ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.64 by root, Mon Oct 23 23:54:41 2006 UTC vs.
Revision 1.75 by root, Thu Oct 26 06:44:48 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
1#if __linux 4#if __linux
2# define _GNU_SOURCE 5# define _GNU_SOURCE
3#endif 6#endif
4 7
5#define _REENTRANT 1 8#define _REENTRANT 1
44/* used for struct dirent, AIX doesn't provide it */ 47/* used for struct dirent, AIX doesn't provide it */
45#ifndef NAME_MAX 48#ifndef NAME_MAX
46# define NAME_MAX 4096 49# define NAME_MAX 4096
47#endif 50#endif
48 51
52#ifndef PTHREAD_STACK_MIN
53/* care for broken platforms, e.g. windows */
54# define PTHREAD_STACK_MIN 16384
55#endif
56
49#if __ia64 57#if __ia64
50# define STACKSIZE 65536 58# define STACKSIZE 65536
59#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
60# define STACKSIZE PTHREAD_STACK_MIN
51#else 61#else
52# define STACKSIZE 8192 62# define STACKSIZE 16384
53#endif 63#endif
64
65/* buffer size for various temporary buffers */
66#define AIO_BUFSIZE 65536
67
68#define dBUF \
69 char *aio_buf; \
70 LOCK (wrklock); \
71 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
72 UNLOCK (wrklock); \
73 if (!aio_buf) \
74 return -1;
54 75
55enum { 76enum {
56 REQ_QUIT, 77 REQ_QUIT,
57 REQ_OPEN, REQ_CLOSE, 78 REQ_OPEN, REQ_CLOSE,
58 REQ_READ, REQ_WRITE, REQ_READAHEAD, 79 REQ_READ, REQ_WRITE, REQ_READAHEAD,
61 REQ_FSYNC, REQ_FDATASYNC, 82 REQ_FSYNC, REQ_FDATASYNC,
62 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 83 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
63 REQ_READDIR, 84 REQ_READDIR,
64 REQ_LINK, REQ_SYMLINK, 85 REQ_LINK, REQ_SYMLINK,
65 REQ_GROUP, REQ_NOP, 86 REQ_GROUP, REQ_NOP,
66 REQ_SLEEP, 87 REQ_BUSY,
67}; 88};
68 89
69#define AIO_REQ_KLASS "IO::AIO::REQ" 90#define AIO_REQ_KLASS "IO::AIO::REQ"
70#define AIO_GRP_KLASS "IO::AIO::GRP" 91#define AIO_GRP_KLASS "IO::AIO::GRP"
71 92
105 PRI_MIN = -4, 126 PRI_MIN = -4,
106 PRI_MAX = 4, 127 PRI_MAX = 4,
107 128
108 DEFAULT_PRI = 0, 129 DEFAULT_PRI = 0,
109 PRI_BIAS = -PRI_MIN, 130 PRI_BIAS = -PRI_MIN,
131 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
110}; 132};
111 133
112static int next_pri = DEFAULT_PRI + PRI_BIAS; 134static int next_pri = DEFAULT_PRI + PRI_BIAS;
113 135
114static int started, wanted; 136static int started, wanted;
115static volatile int nreqs; 137static volatile int nreqs;
116static int max_outstanding = 1<<30;
117static int respipe [2]; 138static int respipe [2];
118 139
119#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 140#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
120# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 141# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
121#else 142#else
122# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 143# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
123#endif 144#endif
124 145
146#define LOCK(mutex) pthread_mutex_lock (&(mutex))
147#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
148
149/* worker threasd management */
150static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
151
152typedef struct worker {
153 /* locked by wrklock */
154 struct worker *prev, *next;
155
156 pthread_t tid;
157
158 /* locked by reslock, reqlock or wrklock */
159 aio_req req; /* currently processed request */
160 void *dbuf;
161 DIR *dirp;
162} worker;
163
164static worker wrk_first = { &wrk_first, &wrk_first, 0 };
165
166static void worker_clear (worker *wrk)
167{
168 if (wrk->dirp)
169 {
170 closedir (wrk->dirp);
171 wrk->dirp = 0;
172 }
173
174 if (wrk->dbuf)
175 {
176 free (wrk->dbuf);
177 wrk->dbuf = 0;
178 }
179}
180
181static void worker_free (worker *wrk)
182{
183 wrk->next->prev = wrk->prev;
184 wrk->prev->next = wrk->next;
185
186 free (wrk);
187}
188
125static pthread_mutex_t reslock = AIO_MUTEX_INIT; 189static pthread_mutex_t reslock = AIO_MUTEX_INIT;
126static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 190static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
127static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 191static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
128 192
129static volatile aio_req reqs, reqe; /* queue start, queue end */ 193/*
130static volatile aio_req ress, rese; /* queue start, queue end */ 194 * a somewhat faster data structure might be nice, but
195 * with 8 priorities this actually needs <20 insns
196 * per shift, the most expensive operation.
197 */
198typedef struct {
199 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
200 int size;
201} reqq;
131 202
203static reqq req_queue;
204static reqq res_queue;
205
206int reqq_push (reqq *q, aio_req req)
207{
208 int pri = req->pri;
209 req->next = 0;
210
211 if (q->qe[pri])
212 {
213 q->qe[pri]->next = req;
214 q->qe[pri] = req;
215 }
216 else
217 q->qe[pri] = q->qs[pri] = req;
218
219 return q->size++;
220}
221
222aio_req reqq_shift (reqq *q)
223{
224 int pri;
225
226 if (!q->size)
227 return 0;
228
229 --q->size;
230
231 for (pri = NUM_PRI; pri--; )
232 {
233 aio_req req = q->qs[pri];
234
235 if (req)
236 {
237 if (!(q->qs[pri] = req->next))
238 q->qe[pri] = 0;
239
240 return req;
241 }
242 }
243
244 abort ();
245}
246
247static int poll_cb ();
132static void req_invoke (aio_req req); 248static void req_invoke (aio_req req);
133static void req_free (aio_req req); 249static void req_free (aio_req req);
250static void req_cancel (aio_req req);
134 251
135/* must be called at most once */ 252/* must be called at most once */
136static SV *req_sv (aio_req req, const char *klass) 253static SV *req_sv (aio_req req, const char *klass)
137{ 254{
138 if (!req->self) 255 if (!req->self)
169 ENTER; 286 ENTER;
170 SAVETMPS; 287 SAVETMPS;
171 PUSHMARK (SP); 288 PUSHMARK (SP);
172 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 289 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
173 PUTBACK; 290 PUTBACK;
174 call_sv (grp->fh2, G_VOID | G_EVAL); 291 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
175 SPAGAIN; 292 SPAGAIN;
176 FREETMPS; 293 FREETMPS;
177 LEAVE; 294 LEAVE;
178 } 295 }
179 296
206{ 323{
207 fd_set rfd; 324 fd_set rfd;
208 325
209 while (nreqs) 326 while (nreqs)
210 { 327 {
211 aio_req req; 328 int size;
212#if !(__x86 || __x86_64) /* safe without sempahore on this archs */ 329#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
213 pthread_mutex_lock (&reslock); 330 LOCK (reslock);
214#endif 331#endif
215 req = ress; 332 size = res_queue.size;
216#if !(__x86 || __x86_64) /* safe without sempahore on this archs */ 333#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
217 pthread_mutex_unlock (&reslock); 334 UNLOCK (reslock);
218#endif 335#endif
219 336
220 if (req) 337 if (size)
221 return; 338 return;
222 339
223 FD_ZERO(&rfd); 340 FD_ZERO(&rfd);
224 FD_SET(respipe [0], &rfd); 341 FD_SET(respipe [0], &rfd);
225 342
228} 345}
229 346
230static void req_invoke (aio_req req) 347static void req_invoke (aio_req req)
231{ 348{
232 dSP; 349 dSP;
233 int errorno = errno;
234 350
235 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 351 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
236 return; 352 {
237
238 errno = req->errorno; 353 errno = req->errorno;
239 354
240 ENTER; 355 ENTER;
241 SAVETMPS; 356 SAVETMPS;
242 PUSHMARK (SP); 357 PUSHMARK (SP);
243 EXTEND (SP, 1); 358 EXTEND (SP, 1);
244 359
245 switch (req->type) 360 switch (req->type)
246 {
247 case REQ_READDIR:
248 { 361 {
249 SV *rv = &PL_sv_undef; 362 case REQ_READDIR:
250
251 if (req->result >= 0)
252 { 363 {
253 char *buf = req->data2ptr; 364 SV *rv = &PL_sv_undef;
254 AV *av = newAV ();
255 365
256 while (req->result) 366 if (req->result >= 0)
257 { 367 {
368 int i;
369 char *buf = req->data2ptr;
370 AV *av = newAV ();
371
372 av_extend (av, req->result - 1);
373
374 for (i = 0; i < req->result; ++i)
375 {
258 SV *sv = newSVpv (buf, 0); 376 SV *sv = newSVpv (buf, 0);
259 377
260 av_push (av, sv); 378 av_store (av, i, sv);
261 buf += SvCUR (sv) + 1; 379 buf += SvCUR (sv) + 1;
262 req->result--; 380 }
381
382 rv = sv_2mortal (newRV_noinc ((SV *)av));
263 } 383 }
264 384
265 rv = sv_2mortal (newRV_noinc ((SV *)av)); 385 PUSHs (rv);
266 } 386 }
387 break;
267 388
268 PUSHs (rv); 389 case REQ_OPEN:
390 {
391 /* convert fd to fh */
392 SV *fh;
393
394 PUSHs (sv_2mortal (newSViv (req->result)));
395 PUTBACK;
396 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
397 SPAGAIN;
398
399 fh = SvREFCNT_inc (POPs);
400
401 PUSHMARK (SP);
402 XPUSHs (sv_2mortal (fh));
403 }
404 break;
405
406 case REQ_GROUP:
407 req->fd = 2; /* mark group as finished */
408
409 if (req->data)
410 {
411 int i;
412 AV *av = (AV *)req->data;
413
414 EXTEND (SP, AvFILL (av) + 1);
415 for (i = 0; i <= AvFILL (av); ++i)
416 PUSHs (*av_fetch (av, i, 0));
417 }
418 break;
419
420 case REQ_NOP:
421 case REQ_BUSY:
422 break;
423
424 default:
425 PUSHs (sv_2mortal (newSViv (req->result)));
426 break;
269 } 427 }
270 break;
271 428
272 case REQ_OPEN:
273 {
274 /* convert fd to fh */
275 SV *fh;
276 429
277 PUSHs (sv_2mortal (newSViv (req->result)));
278 PUTBACK; 430 PUTBACK;
279 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
280 SPAGAIN;
281
282 fh = SvREFCNT_inc (POPs);
283
284 PUSHMARK (SP);
285 XPUSHs (sv_2mortal (fh));
286 }
287 break;
288
289 case REQ_GROUP:
290 req->fd = 2; /* mark group as finished */
291
292 if (req->data)
293 {
294 int i;
295 AV *av = (AV *)req->data;
296
297 EXTEND (SP, AvFILL (av) + 1);
298 for (i = 0; i <= AvFILL (av); ++i)
299 PUSHs (*av_fetch (av, i, 0));
300 }
301 break;
302
303 case REQ_NOP:
304 case REQ_SLEEP:
305 break;
306
307 default:
308 PUSHs (sv_2mortal (newSViv (req->result)));
309 break;
310 }
311
312
313 PUTBACK;
314 call_sv (req->callback, G_VOID | G_EVAL); 431 call_sv (req->callback, G_VOID | G_EVAL);
315 SPAGAIN; 432 SPAGAIN;
316 433
317 FREETMPS; 434 FREETMPS;
318 LEAVE; 435 LEAVE;
319
320 errno = errorno;
321
322 if (SvTRUE (ERRSV))
323 { 436 }
324 req_free (req);
325 croak (0);
326 }
327}
328 437
329static void req_free (aio_req req)
330{
331 if (req->grp) 438 if (req->grp)
332 { 439 {
333 aio_req grp = req->grp; 440 aio_req grp = req->grp;
334 441
335 /* unlink request */ 442 /* unlink request */
340 grp->grp_first = req->grp_next; 447 grp->grp_first = req->grp_next;
341 448
342 aio_grp_dec (grp); 449 aio_grp_dec (grp);
343 } 450 }
344 451
452 if (SvTRUE (ERRSV))
453 {
454 req_free (req);
455 croak (0);
456 }
457}
458
459static void req_free (aio_req req)
460{
345 if (req->self) 461 if (req->self)
346 { 462 {
347 sv_unmagic (req->self, PERL_MAGIC_ext); 463 sv_unmagic (req->self, PERL_MAGIC_ext);
348 SvREFCNT_dec (req->self); 464 SvREFCNT_dec (req->self);
349 } 465 }
352 SvREFCNT_dec (req->fh); 468 SvREFCNT_dec (req->fh);
353 SvREFCNT_dec (req->fh2); 469 SvREFCNT_dec (req->fh2);
354 SvREFCNT_dec (req->callback); 470 SvREFCNT_dec (req->callback);
355 Safefree (req->statdata); 471 Safefree (req->statdata);
356 472
357 if (req->type == REQ_READDIR && req->result >= 0) 473 if (req->type == REQ_READDIR)
358 free (req->data2ptr); 474 free (req->data2ptr);
359 475
360 Safefree (req); 476 Safefree (req);
361} 477}
362 478
479static void req_cancel_subs (aio_req grp)
480{
481 aio_req sub;
482
483 if (grp->type != REQ_GROUP)
484 return;
485
486 SvREFCNT_dec (grp->fh2);
487 grp->fh2 = 0;
488
489 for (sub = grp->grp_first; sub; sub = sub->grp_next)
490 req_cancel (sub);
491}
492
363static void req_cancel (aio_req req) 493static void req_cancel (aio_req req)
364{ 494{
365 req->flags |= FLAG_CANCELLED; 495 req->flags |= FLAG_CANCELLED;
366 496
367 if (req->type == REQ_GROUP) 497 req_cancel_subs (req);
368 {
369 aio_req sub;
370
371 for (sub = req->grp_first; sub; sub = sub->grp_next)
372 req_cancel (sub);
373 }
374} 498}
375 499
376static int poll_cb () 500static int poll_cb ()
377{ 501{
378 dSP; 502 dSP;
380 int do_croak = 0; 504 int do_croak = 0;
381 aio_req req; 505 aio_req req;
382 506
383 for (;;) 507 for (;;)
384 { 508 {
385 pthread_mutex_lock (&reslock); 509 LOCK (reslock);
386 req = ress; 510 req = reqq_shift (&res_queue);
387 511
388 if (req) 512 if (req)
389 { 513 {
390 ress = req->next;
391
392 if (!ress) 514 if (!res_queue.size)
393 { 515 {
394 /* read any signals sent by the worker threads */ 516 /* read any signals sent by the worker threads */
395 char buf [32]; 517 char buf [32];
396 while (read (respipe [0], buf, 32) == 32) 518 while (read (respipe [0], buf, 32) == 32)
397 ; 519 ;
398
399 rese = 0;
400 } 520 }
401 } 521 }
402 522
403 pthread_mutex_unlock (&reslock); 523 UNLOCK (reslock);
404 524
405 if (!req) 525 if (!req)
406 break; 526 break;
407 527
408 --nreqs; 528 --nreqs;
443static void *aio_proc(void *arg); 563static void *aio_proc(void *arg);
444 564
445static void start_thread (void) 565static void start_thread (void)
446{ 566{
447 sigset_t fullsigset, oldsigset; 567 sigset_t fullsigset, oldsigset;
448 pthread_t tid;
449 pthread_attr_t attr; 568 pthread_attr_t attr;
569
570 worker *wrk = calloc (1, sizeof (worker));
571
572 if (!wrk)
573 croak ("unable to allocate worker thread data");
450 574
451 pthread_attr_init (&attr); 575 pthread_attr_init (&attr);
452 pthread_attr_setstacksize (&attr, STACKSIZE); 576 pthread_attr_setstacksize (&attr, STACKSIZE);
453 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 577 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
454 578
455 sigfillset (&fullsigset); 579 sigfillset (&fullsigset);
580
581 LOCK (wrklock);
456 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 582 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
457 583
458 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 584 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
585 {
586 wrk->prev = &wrk_first;
587 wrk->next = wrk_first.next;
588 wrk_first.next->prev = wrk;
589 wrk_first.next = wrk;
459 started++; 590 started++;
591 }
592 else
593 free (wrk);
460 594
461 sigprocmask (SIG_SETMASK, &oldsigset, 0); 595 sigprocmask (SIG_SETMASK, &oldsigset, 0);
596 UNLOCK (wrklock);
462} 597}
463 598
464static void req_send (aio_req req) 599static void req_send (aio_req req)
465{ 600{
466 while (started < wanted && nreqs >= started) 601 while (started < wanted && nreqs >= started)
467 start_thread (); 602 start_thread ();
468 603
469 ++nreqs; 604 ++nreqs;
470 605
471 pthread_mutex_lock (&reqlock); 606 LOCK (reqlock);
472 607 reqq_push (&req_queue, req);
473 req->next = 0;
474
475 if (reqe)
476 {
477 reqe->next = req;
478 reqe = req;
479 }
480 else
481 reqe = reqs = req;
482
483 pthread_cond_signal (&reqwait); 608 pthread_cond_signal (&reqwait);
484 pthread_mutex_unlock (&reqlock); 609 UNLOCK (reqlock);
485
486 if (nreqs > max_outstanding)
487 for (;;)
488 {
489 poll_cb ();
490
491 if (nreqs <= max_outstanding)
492 break;
493
494 poll_wait ();
495 }
496} 610}
497 611
498static void end_thread (void) 612static void end_thread (void)
499{ 613{
500 aio_req req; 614 aio_req req;
615
501 Newz (0, req, 1, aio_cb); 616 Newz (0, req, 1, aio_cb);
617
502 req->type = REQ_QUIT; 618 req->type = REQ_QUIT;
619 req->pri = PRI_MAX + PRI_BIAS;
503 620
504 req_send (req); 621 req_send (req);
505} 622}
506 623
507static void min_parallel (int nthreads) 624static void min_parallel (int nthreads)
559static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 676static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
560{ 677{
561 ssize_t res; 678 ssize_t res;
562 off_t ooffset; 679 off_t ooffset;
563 680
564 pthread_mutex_lock (&preadwritelock); 681 LOCK (preadwritelock);
565 ooffset = lseek (fd, 0, SEEK_CUR); 682 ooffset = lseek (fd, 0, SEEK_CUR);
566 lseek (fd, offset, SEEK_SET); 683 lseek (fd, offset, SEEK_SET);
567 res = read (fd, buf, count); 684 res = read (fd, buf, count);
568 lseek (fd, ooffset, SEEK_SET); 685 lseek (fd, ooffset, SEEK_SET);
569 pthread_mutex_unlock (&preadwritelock); 686 UNLOCK (preadwritelock);
570 687
571 return res; 688 return res;
572} 689}
573 690
574static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 691static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
575{ 692{
576 ssize_t res; 693 ssize_t res;
577 off_t ooffset; 694 off_t ooffset;
578 695
579 pthread_mutex_lock (&preadwritelock); 696 LOCK (preadwritelock);
580 ooffset = lseek (fd, 0, SEEK_CUR); 697 ooffset = lseek (fd, 0, SEEK_CUR);
581 lseek (fd, offset, SEEK_SET); 698 lseek (fd, offset, SEEK_SET);
582 res = write (fd, buf, count); 699 res = write (fd, buf, count);
583 lseek (fd, offset, SEEK_SET); 700 lseek (fd, offset, SEEK_SET);
584 pthread_mutex_unlock (&preadwritelock); 701 UNLOCK (preadwritelock);
585 702
586 return res; 703 return res;
587} 704}
588#endif 705#endif
589 706
590#if !HAVE_FDATASYNC 707#if !HAVE_FDATASYNC
591# define fdatasync fsync 708# define fdatasync fsync
592#endif 709#endif
593 710
594#if !HAVE_READAHEAD 711#if !HAVE_READAHEAD
595# define readahead aio_readahead 712# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
596 713
597static ssize_t readahead (int fd, off_t offset, size_t count) 714static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
598{ 715{
599 char readahead_buf[4096]; 716 dBUF;
600 717
601 while (count > 0) 718 while (count > 0)
602 { 719 {
603 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 720 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
604 721
605 pread (fd, readahead_buf, len, offset); 722 pread (fd, aio_buf, len, offset);
606 offset += len; 723 offset += len;
607 count -= len; 724 count -= len;
608 } 725 }
609 726
610 errno = 0; 727 errno = 0;
611} 728}
729
612#endif 730#endif
613 731
614#if !HAVE_READDIR_R 732#if !HAVE_READDIR_R
615# define readdir_r aio_readdir_r 733# define readdir_r aio_readdir_r
616 734
619static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 737static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
620{ 738{
621 struct dirent *e; 739 struct dirent *e;
622 int errorno; 740 int errorno;
623 741
624 pthread_mutex_lock (&readdirlock); 742 LOCK (readdirlock);
625 743
626 e = readdir (dirp); 744 e = readdir (dirp);
627 errorno = errno; 745 errorno = errno;
628 746
629 if (e) 747 if (e)
632 strcpy (ent->d_name, e->d_name); 750 strcpy (ent->d_name, e->d_name);
633 } 751 }
634 else 752 else
635 *res = 0; 753 *res = 0;
636 754
637 pthread_mutex_unlock (&readdirlock); 755 UNLOCK (readdirlock);
638 756
639 errno = errorno; 757 errno = errorno;
640 return e ? 0 : -1; 758 return e ? 0 : -1;
641} 759}
642#endif 760#endif
643 761
644/* sendfile always needs emulation */ 762/* sendfile always needs emulation */
645static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 763static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
646{ 764{
647 ssize_t res; 765 ssize_t res;
648 766
649 if (!count) 767 if (!count)
650 return 0; 768 return 0;
699#endif 817#endif
700 ) 818 )
701 ) 819 )
702 { 820 {
703 /* emulate sendfile. this is a major pain in the ass */ 821 /* emulate sendfile. this is a major pain in the ass */
704 char buf[4096]; 822 dBUF;
823
705 res = 0; 824 res = 0;
706 825
707 while (count) 826 while (count)
708 { 827 {
709 ssize_t cnt; 828 ssize_t cnt;
710 829
711 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 830 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
712 831
713 if (cnt <= 0) 832 if (cnt <= 0)
714 { 833 {
715 if (cnt && !res) res = -1; 834 if (cnt && !res) res = -1;
716 break; 835 break;
717 } 836 }
718 837
719 cnt = write (ofd, buf, cnt); 838 cnt = write (ofd, aio_buf, cnt);
720 839
721 if (cnt <= 0) 840 if (cnt <= 0)
722 { 841 {
723 if (cnt && !res) res = -1; 842 if (cnt && !res) res = -1;
724 break; 843 break;
732 851
733 return res; 852 return res;
734} 853}
735 854
736/* read a full directory */ 855/* read a full directory */
737static int scandir_ (const char *path, void **namesp) 856static void scandir_ (aio_req req, worker *self)
738{ 857{
739 DIR *dirp = opendir (path); 858 DIR *dirp;
740 union 859 union
741 { 860 {
742 struct dirent d; 861 struct dirent d;
743 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 862 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
744 } u; 863 } *u;
745 struct dirent *entp; 864 struct dirent *entp;
746 char *name, *names; 865 char *name, *names;
747 int memlen = 4096; 866 int memlen = 4096;
748 int memofs = 0; 867 int memofs = 0;
749 int res = 0; 868 int res = 0;
750 int errorno; 869 int errorno;
751 870
752 if (!dirp) 871 LOCK (wrklock);
753 return -1; 872 self->dirp = dirp = opendir (req->dataptr);
873 self->dbuf = u = malloc (sizeof (*u));
874 UNLOCK (wrklock);
754 875
755 names = malloc (memlen); 876 req->data2ptr = names = malloc (memlen);
756 877
878 if (dirp && u && names)
757 for (;;) 879 for (;;)
758 { 880 {
881 errno = 0;
759 errno = 0, readdir_r (dirp, &u.d, &entp); 882 readdir_r (dirp, &u->d, &entp);
760 883
761 if (!entp) 884 if (!entp)
762 break; 885 break;
763 886
764 name = entp->d_name; 887 name = entp->d_name;
765 888
766 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 889 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
767 { 890 {
768 int len = strlen (name) + 1; 891 int len = strlen (name) + 1;
769 892
770 res++; 893 res++;
771 894
772 while (memofs + len > memlen) 895 while (memofs + len > memlen)
773 { 896 {
774 memlen *= 2; 897 memlen *= 2;
898 LOCK (wrklock);
775 names = realloc (names, memlen); 899 req->data2ptr = names = realloc (names, memlen);
900 UNLOCK (wrklock);
901
776 if (!names) 902 if (!names)
777 break; 903 break;
778 } 904 }
779 905
780 memcpy (names + memofs, name, len); 906 memcpy (names + memofs, name, len);
781 memofs += len; 907 memofs += len;
782 } 908 }
783 } 909 }
784 910
785 errorno = errno;
786 closedir (dirp);
787
788 if (errorno) 911 if (errno)
789 {
790 free (names);
791 errno = errorno;
792 res = -1; 912 res = -1;
793 } 913
794 914 req->result = res;
795 *namesp = (void *)names;
796 return res;
797} 915}
798 916
799/*****************************************************************************/ 917/*****************************************************************************/
800 918
801static void *aio_proc (void *thr_arg) 919static void *aio_proc (void *thr_arg)
802{ 920{
803 aio_req req; 921 aio_req req;
804 int type; 922 int type;
923 worker *self = (worker *)thr_arg;
805 924
806 do 925 do
807 { 926 {
808 pthread_mutex_lock (&reqlock); 927 LOCK (reqlock);
809 928
810 for (;;) 929 for (;;)
811 { 930 {
812 req = reqs; 931 self->req = req = reqq_shift (&req_queue);
813
814 if (reqs)
815 {
816 reqs = reqs->next;
817 if (!reqs) reqe = 0;
818 }
819 932
820 if (req) 933 if (req)
821 break; 934 break;
822 935
823 pthread_cond_wait (&reqwait, &reqlock); 936 pthread_cond_wait (&reqwait, &reqlock);
824 } 937 }
825 938
826 pthread_mutex_unlock (&reqlock); 939 UNLOCK (reqlock);
827 940
828 errno = 0; /* strictly unnecessary */ 941 errno = 0; /* strictly unnecessary */
829 type = req->type; /* remember type for QUIT check */ 942 type = req->type; /* remember type for QUIT check */
830 943
831 if (!(req->flags & FLAG_CANCELLED)) 944 if (!(req->flags & FLAG_CANCELLED))
833 { 946 {
834 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 947 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
835 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 948 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
836 949
837 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 950 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
838 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 951 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
839 952
840 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 953 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
841 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 954 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
842 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 955 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
843 956
849 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 962 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
850 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 963 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
851 964
852 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 965 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
853 case REQ_FSYNC: req->result = fsync (req->fd); break; 966 case REQ_FSYNC: req->result = fsync (req->fd); break;
854 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 967 case REQ_READDIR: scandir_ (req, self); break;
855 968
856 case REQ_SLEEP: 969 case REQ_BUSY:
857 { 970 {
858 struct timeval tv; 971 struct timeval tv;
859 972
860 tv.tv_sec = req->fd; 973 tv.tv_sec = req->fd;
861 tv.tv_usec = req->fd2; 974 tv.tv_usec = req->fd2;
873 break; 986 break;
874 } 987 }
875 988
876 req->errorno = errno; 989 req->errorno = errno;
877 990
878 pthread_mutex_lock (&reslock); 991 LOCK (reslock);
879 992
880 req->next = 0; 993 if (!reqq_push (&res_queue, req))
881
882 if (rese)
883 {
884 rese->next = req;
885 rese = req;
886 }
887 else
888 {
889 rese = ress = req;
890
891 /* write a dummy byte to the pipe so fh becomes ready */ 994 /* write a dummy byte to the pipe so fh becomes ready */
892 write (respipe [1], &respipe, 1); 995 write (respipe [1], &respipe, 1);
893 }
894 996
895 pthread_mutex_unlock (&reslock); 997 self->req = 0;
998 worker_clear (self);
999
1000 UNLOCK (reslock);
896 } 1001 }
897 while (type != REQ_QUIT); 1002 while (type != REQ_QUIT);
898 1003
1004 LOCK (wrklock);
1005 worker_free (self);
1006 UNLOCK (wrklock);
1007
899 return 0; 1008 return 0;
900} 1009}
901 1010
902/*****************************************************************************/ 1011/*****************************************************************************/
903 1012
904static void atfork_prepare (void) 1013static void atfork_prepare (void)
905{ 1014{
906 pthread_mutex_lock (&reqlock); 1015 LOCK (wrklock);
907 pthread_mutex_lock (&reslock); 1016 LOCK (reqlock);
1017 LOCK (reslock);
908#if !HAVE_PREADWRITE 1018#if !HAVE_PREADWRITE
909 pthread_mutex_lock (&preadwritelock); 1019 LOCK (preadwritelock);
910#endif 1020#endif
911#if !HAVE_READDIR_R 1021#if !HAVE_READDIR_R
912 pthread_mutex_lock (&readdirlock); 1022 LOCK (readdirlock);
913#endif 1023#endif
914} 1024}
915 1025
916static void atfork_parent (void) 1026static void atfork_parent (void)
917{ 1027{
918#if !HAVE_READDIR_R 1028#if !HAVE_READDIR_R
919 pthread_mutex_unlock (&readdirlock); 1029 UNLOCK (readdirlock);
920#endif 1030#endif
921#if !HAVE_PREADWRITE 1031#if !HAVE_PREADWRITE
922 pthread_mutex_unlock (&preadwritelock); 1032 UNLOCK (preadwritelock);
923#endif 1033#endif
924 pthread_mutex_unlock (&reslock); 1034 UNLOCK (reslock);
925 pthread_mutex_unlock (&reqlock); 1035 UNLOCK (reqlock);
1036 UNLOCK (wrklock);
926} 1037}
927 1038
928static void atfork_child (void) 1039static void atfork_child (void)
929{ 1040{
930 aio_req prv; 1041 aio_req prv;
931 1042
1043 while (prv = reqq_shift (&req_queue))
1044 req_free (prv);
1045
1046 while (prv = reqq_shift (&res_queue))
1047 req_free (prv);
1048
1049 while (wrk_first.next != &wrk_first)
1050 {
1051 worker *wrk = wrk_first.next;
1052
1053 if (wrk->req)
1054 req_free (wrk->req);
1055
1056 worker_clear (wrk);
1057 worker_free (wrk);
1058 }
1059
932 started = 0; 1060 started = 0;
933 1061 nreqs = 0;
934 while (reqs)
935 {
936 prv = reqs;
937 reqs = prv->next;
938 req_free (prv);
939 }
940
941 reqs = reqe = 0;
942
943 while (ress)
944 {
945 prv = ress;
946 ress = prv->next;
947 req_free (prv);
948 }
949
950 ress = rese = 0;
951 1062
952 close (respipe [0]); 1063 close (respipe [0]);
953 close (respipe [1]); 1064 close (respipe [1]);
954 create_pipe (); 1065 create_pipe ();
955 1066
1000void 1111void
1001max_parallel (nthreads) 1112max_parallel (nthreads)
1002 int nthreads 1113 int nthreads
1003 PROTOTYPE: $ 1114 PROTOTYPE: $
1004 1115
1005int
1006max_outstanding (nreqs)
1007 int nreqs
1008 PROTOTYPE: $
1009 CODE:
1010 RETVAL = max_outstanding;
1011 max_outstanding = nreqs;
1012
1013void 1116void
1014aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1117aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1015 SV * pathname 1118 SV * pathname
1016 int flags 1119 int flags
1017 int mode 1120 int mode
1231 1334
1232 REQ_SEND; 1335 REQ_SEND;
1233} 1336}
1234 1337
1235void 1338void
1236aio_sleep (delay,callback=&PL_sv_undef) 1339aio_busy (delay,callback=&PL_sv_undef)
1237 double delay 1340 double delay
1238 SV * callback 1341 SV * callback
1239 PPCODE: 1342 PPCODE:
1240{ 1343{
1241 dREQ; 1344 dREQ;
1242 1345
1243 req->type = REQ_SLEEP; 1346 req->type = REQ_BUSY;
1244 req->fd = delay < 0. ? 0 : delay; 1347 req->fd = delay < 0. ? 0 : delay;
1245 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1348 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1246 1349
1247 REQ_SEND; 1350 REQ_SEND;
1248} 1351}
1271 req->type = REQ_NOP; 1374 req->type = REQ_NOP;
1272 1375
1273 REQ_SEND; 1376 REQ_SEND;
1274} 1377}
1275 1378
1276#if 0
1277
1278void 1379void
1279aio_pri (int pri = DEFAULT_PRI) 1380aioreq_pri (int pri = DEFAULT_PRI)
1280 CODE: 1381 CODE:
1281 if (pri < PRI_MIN) pri = PRI_MIN; 1382 if (pri < PRI_MIN) pri = PRI_MIN;
1282 if (pri > PRI_MAX) pri = PRI_MAX; 1383 if (pri > PRI_MAX) pri = PRI_MAX;
1283 next_pri = pri + PRI_BIAS; 1384 next_pri = pri + PRI_BIAS;
1284 1385
1285#endif 1386void
1387aioreq_nice (int nice = 0)
1388 CODE:
1389 nice = next_pri - nice;
1390 if (nice < PRI_MIN) nice = PRI_MIN;
1391 if (nice > PRI_MAX) nice = PRI_MAX;
1392 next_pri = nice + PRI_BIAS;
1286 1393
1287void 1394void
1288flush () 1395flush ()
1289 PROTOTYPE: 1396 PROTOTYPE:
1290 CODE: 1397 CODE:
1339 1446
1340MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1447MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1341 1448
1342void 1449void
1343cancel (aio_req_ornot req) 1450cancel (aio_req_ornot req)
1344 PROTOTYPE:
1345 CODE: 1451 CODE:
1346 req_cancel (req); 1452 req_cancel (req);
1347 1453
1348void 1454void
1349cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1455cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1385 } 1491 }
1386 } 1492 }
1387} 1493}
1388 1494
1389void 1495void
1496cancel_subs (aio_req_ornot req)
1497 CODE:
1498 req_cancel_subs (req);
1499
1500void
1390result (aio_req grp, ...) 1501result (aio_req grp, ...)
1391 CODE: 1502 CODE:
1392{ 1503{
1393 int i; 1504 int i;
1394 AV *av = newAV (); 1505 AV *av = newAV ();
1399 SvREFCNT_dec (grp->data); 1510 SvREFCNT_dec (grp->data);
1400 grp->data = (SV *)av; 1511 grp->data = (SV *)av;
1401} 1512}
1402 1513
1403void 1514void
1404feed_limit (aio_req grp, int limit) 1515limit (aio_req grp, int limit)
1405 CODE: 1516 CODE:
1406 grp->fd2 = limit; 1517 grp->fd2 = limit;
1407 aio_grp_feed (grp); 1518 aio_grp_feed (grp);
1408 1519
1409void 1520void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines