ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.51 by root, Sun Oct 22 22:14:33 2006 UTC vs.
Revision 1.72 by root, Tue Oct 24 17:22:17 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux
5# define _GNU_SOURCE
6#endif
7
1#define _REENTRANT 1 8#define _REENTRANT 1
9
2#include <errno.h> 10#include <errno.h>
3 11
4#include "EXTERN.h" 12#include "EXTERN.h"
5#include "perl.h" 13#include "perl.h"
6#include "XSUB.h" 14#include "XSUB.h"
41# define NAME_MAX 4096 49# define NAME_MAX 4096
42#endif 50#endif
43 51
44#if __ia64 52#if __ia64
45# define STACKSIZE 65536 53# define STACKSIZE 65536
54#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55# define STACKSIZE PTHREAD_STACK_MIN
46#else 56#else
47# define STACKSIZE 8192 57# define STACKSIZE 16384
48#endif 58#endif
59
60/* buffer size for various temporary buffers */
61#define AIO_BUFSIZE 65536
62
63#define dBUF \
64 char *aio_buf; \
65 LOCK (wrklock); \
66 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
67 UNLOCK (wrklock); \
68 if (!aio_buf) \
69 return -1;
49 70
50enum { 71enum {
51 REQ_QUIT, 72 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 73 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 74 REQ_READ, REQ_WRITE, REQ_READAHEAD,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 76 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 77 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 78 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 79 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 80 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 81 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 82 REQ_BUSY,
62}; 83};
63 84
64#define AIO_REQ_KLASS "IO::AIO::REQ" 85#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 86#define AIO_GRP_KLASS "IO::AIO::GRP"
66 87
67typedef struct aio_cb 88typedef struct aio_cb
68{ 89{
69 struct aio_cb *volatile next; 90 struct aio_cb *volatile next;
70
71 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 91
75 SV *data, *callback; 92 SV *data, *callback;
76 SV *fh, *fh2; 93 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 94 void *dataptr, *data2ptr;
78 Stat_t *statdata; 95 Stat_t *statdata;
79 off_t offset; 96 off_t offset;
80 size_t length; 97 size_t length;
81 ssize_t result; 98 ssize_t result;
82 99
100 STRLEN dataoffset;
83 int type; 101 int type;
84 int fd, fd2; 102 int fd, fd2;
85 int errorno; 103 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 104 mode_t mode; /* open */
105
88 unsigned char cancelled; 106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 111} aio_cb;
112
113enum {
114 FLAG_CANCELLED = 0x01,
115};
90 116
91typedef aio_cb *aio_req; 117typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 118typedef aio_cb *aio_req_ornot;
119
120enum {
121 PRI_MIN = -4,
122 PRI_MAX = 4,
123
124 DEFAULT_PRI = 0,
125 PRI_BIAS = -PRI_MIN,
126 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
127};
128
129static int next_pri = DEFAULT_PRI + PRI_BIAS;
93 130
94static int started, wanted; 131static int started, wanted;
95static volatile int nreqs; 132static volatile int nreqs;
96static int max_outstanding = 1<<30; 133static int max_outstanding = 1<<30;
97static int respipe [2]; 134static int respipe [2];
98 135
136#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
137# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
138#else
139# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
140#endif
141
142#define LOCK(mutex) pthread_mutex_lock (&(mutex))
143#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
144
145/* worker threasd management */
146static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
147
148typedef struct worker {
149 /* locked by wrklock */
150 struct worker *prev, *next;
151
152 pthread_t tid;
153
154 /* locked by reslock, reqlock or wrklock */
155 aio_req req; /* currently processed request */
156 void *dbuf;
157 DIR *dirp;
158} worker;
159
160static worker wrk_first = { &wrk_first, &wrk_first, 0 };
161
162static void worker_clear (worker *wrk)
163{
164 if (wrk->dirp)
165 {
166 closedir (wrk->dirp);
167 wrk->dirp = 0;
168 }
169
170 if (wrk->dbuf)
171 {
172 free (wrk->dbuf);
173 wrk->dbuf = 0;
174 }
175}
176
177static void worker_free (worker *wrk)
178{
179 wrk->next->prev = wrk->prev;
180 wrk->prev->next = wrk->next;
181
182 free (wrk);
183}
184
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 185static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 186static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 187static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 188
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 189/*
104static volatile aio_req ress, rese; /* queue start, queue end */ 190 * a somewhat faster data structure might be nice, but
191 * with 8 priorities this actually needs <20 insns
192 * per shift, the most expensive operation.
193 */
194typedef struct {
195 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
196 int size;
197} reqq;
198
199static reqq req_queue;
200static reqq res_queue;
201
202int reqq_push (reqq *q, aio_req req)
203{
204 int pri = req->pri;
205 req->next = 0;
206
207 if (q->qe[pri])
208 {
209 q->qe[pri]->next = req;
210 q->qe[pri] = req;
211 }
212 else
213 q->qe[pri] = q->qs[pri] = req;
214
215 return q->size++;
216}
217
218aio_req reqq_shift (reqq *q)
219{
220 int pri;
221
222 if (!q->size)
223 return 0;
224
225 --q->size;
226
227 for (pri = NUM_PRI; pri--; )
228 {
229 aio_req req = q->qs[pri];
230
231 if (req)
232 {
233 if (!(q->qs[pri] = req->next))
234 q->qe[pri] = 0;
235
236 return req;
237 }
238 }
239
240 abort ();
241}
105 242
106static void req_invoke (aio_req req); 243static void req_invoke (aio_req req);
107static void req_free (aio_req req); 244static void req_free (aio_req req);
245static void req_cancel (aio_req req);
108 246
109/* must be called at most once */ 247/* must be called at most once */
110static SV *req_sv (aio_req req, const char *klass) 248static SV *req_sv (aio_req req, const char *klass)
111{ 249{
112 if (!req->self) 250 if (!req->self)
118 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 256 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
119} 257}
120 258
121static aio_req SvAIO_REQ (SV *sv) 259static aio_req SvAIO_REQ (SV *sv)
122{ 260{
261 MAGIC *mg;
262
123 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 263 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
124 croak ("object of class " AIO_REQ_KLASS " expected"); 264 croak ("object of class " AIO_REQ_KLASS " expected");
125 265
126 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 266 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
127 267
128 return mg ? (aio_req)mg->mg_ptr : 0; 268 return mg ? (aio_req)mg->mg_ptr : 0;
129} 269}
130 270
131static void aio_grp_feed (aio_req grp) 271static void aio_grp_feed (aio_req grp)
132{ 272{
133 while (grp->length < grp->fd2) 273 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
134 { 274 {
135 int old_len = grp->length; 275 int old_len = grp->length;
136 276
137 if (grp->fh2 && SvOK (grp->fh2)) 277 if (grp->fh2 && SvOK (grp->fh2))
138 { 278 {
141 ENTER; 281 ENTER;
142 SAVETMPS; 282 SAVETMPS;
143 PUSHMARK (SP); 283 PUSHMARK (SP);
144 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 284 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
145 PUTBACK; 285 PUTBACK;
146 call_sv (grp->fh2, G_VOID | G_EVAL); 286 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
147 SPAGAIN; 287 SPAGAIN;
148 FREETMPS; 288 FREETMPS;
149 LEAVE; 289 LEAVE;
150 } 290 }
151 291
174 } 314 }
175} 315}
176 316
177static void poll_wait () 317static void poll_wait ()
178{ 318{
179 if (nreqs && !ress)
180 {
181 fd_set rfd; 319 fd_set rfd;
320
321 while (nreqs)
322 {
323 int size;
324#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
325 LOCK (reslock);
326#endif
327 size = res_queue.size;
328#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
329 UNLOCK (reslock);
330#endif
331
332 if (size)
333 return;
334
182 FD_ZERO(&rfd); 335 FD_ZERO(&rfd);
183 FD_SET(respipe [0], &rfd); 336 FD_SET(respipe [0], &rfd);
184 337
185 select (respipe [0] + 1, &rfd, 0, 0, 0); 338 select (respipe [0] + 1, &rfd, 0, 0, 0);
186 } 339 }
187} 340}
188 341
189static void req_invoke (aio_req req) 342static void req_invoke (aio_req req)
190{ 343{
191 dSP; 344 dSP;
192 int errorno = errno;
193 345
194 if (req->cancelled || !SvOK (req->callback)) 346 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
195 return; 347 {
196
197 errno = req->errorno; 348 errno = req->errorno;
198 349
199 ENTER; 350 ENTER;
200 SAVETMPS; 351 SAVETMPS;
201 PUSHMARK (SP); 352 PUSHMARK (SP);
202 EXTEND (SP, 1); 353 EXTEND (SP, 1);
203 354
204 switch (req->type) 355 switch (req->type)
205 {
206 case REQ_READDIR:
207 { 356 {
208 SV *rv = &PL_sv_undef; 357 case REQ_READDIR:
209
210 if (req->result >= 0)
211 { 358 {
212 char *buf = req->data2ptr; 359 SV *rv = &PL_sv_undef;
213 AV *av = newAV ();
214 360
215 while (req->result) 361 if (req->result >= 0)
216 { 362 {
363 int i;
364 char *buf = req->data2ptr;
365 AV *av = newAV ();
366
367 av_extend (av, req->result - 1);
368
369 for (i = 0; i < req->result; ++i)
370 {
217 SV *sv = newSVpv (buf, 0); 371 SV *sv = newSVpv (buf, 0);
218 372
219 av_push (av, sv); 373 av_store (av, i, sv);
220 buf += SvCUR (sv) + 1; 374 buf += SvCUR (sv) + 1;
221 req->result--; 375 }
376
377 rv = sv_2mortal (newRV_noinc ((SV *)av));
222 } 378 }
223 379
224 rv = sv_2mortal (newRV_noinc ((SV *)av)); 380 PUSHs (rv);
225 } 381 }
382 break;
226 383
227 PUSHs (rv); 384 case REQ_OPEN:
385 {
386 /* convert fd to fh */
387 SV *fh;
388
389 PUSHs (sv_2mortal (newSViv (req->result)));
390 PUTBACK;
391 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
392 SPAGAIN;
393
394 fh = SvREFCNT_inc (POPs);
395
396 PUSHMARK (SP);
397 XPUSHs (sv_2mortal (fh));
398 }
399 break;
400
401 case REQ_GROUP:
402 req->fd = 2; /* mark group as finished */
403
404 if (req->data)
405 {
406 int i;
407 AV *av = (AV *)req->data;
408
409 EXTEND (SP, AvFILL (av) + 1);
410 for (i = 0; i <= AvFILL (av); ++i)
411 PUSHs (*av_fetch (av, i, 0));
412 }
413 break;
414
415 case REQ_NOP:
416 case REQ_BUSY:
417 break;
418
419 default:
420 PUSHs (sv_2mortal (newSViv (req->result)));
421 break;
228 } 422 }
229 break;
230 423
231 case REQ_OPEN:
232 {
233 /* convert fd to fh */
234 SV *fh;
235 424
236 PUSHs (sv_2mortal (newSViv (req->result)));
237 PUTBACK; 425 PUTBACK;
238 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
239 SPAGAIN;
240
241 fh = SvREFCNT_inc (POPs);
242
243 PUSHMARK (SP);
244 XPUSHs (sv_2mortal (fh));
245 }
246 break;
247
248 case REQ_GROUP:
249 req->fd = 2; /* mark group as finished */
250
251 if (req->data)
252 {
253 int i;
254 AV *av = (AV *)req->data;
255
256 EXTEND (SP, AvFILL (av) + 1);
257 for (i = 0; i <= AvFILL (av); ++i)
258 PUSHs (*av_fetch (av, i, 0));
259 }
260 break;
261
262 case REQ_SLEEP:
263 break;
264
265 default:
266 PUSHs (sv_2mortal (newSViv (req->result)));
267 break;
268 }
269
270
271 PUTBACK;
272 call_sv (req->callback, G_VOID | G_EVAL); 426 call_sv (req->callback, G_VOID | G_EVAL);
273 SPAGAIN; 427 SPAGAIN;
274 428
275 FREETMPS; 429 FREETMPS;
276 LEAVE; 430 LEAVE;
277
278 errno = errorno;
279
280 if (SvTRUE (ERRSV))
281 { 431 }
282 req_free (req);
283 croak (0);
284 }
285}
286 432
287static void req_free (aio_req req)
288{
289 if (req->grp) 433 if (req->grp)
290 { 434 {
291 aio_req grp = req->grp; 435 aio_req grp = req->grp;
292 436
293 /* unlink request */ 437 /* unlink request */
298 grp->grp_first = req->grp_next; 442 grp->grp_first = req->grp_next;
299 443
300 aio_grp_dec (grp); 444 aio_grp_dec (grp);
301 } 445 }
302 446
447 if (SvTRUE (ERRSV))
448 {
449 req_free (req);
450 croak (0);
451 }
452}
453
454static void req_free (aio_req req)
455{
303 if (req->self) 456 if (req->self)
304 { 457 {
305 sv_unmagic (req->self, PERL_MAGIC_ext); 458 sv_unmagic (req->self, PERL_MAGIC_ext);
306 SvREFCNT_dec (req->self); 459 SvREFCNT_dec (req->self);
307 } 460 }
310 SvREFCNT_dec (req->fh); 463 SvREFCNT_dec (req->fh);
311 SvREFCNT_dec (req->fh2); 464 SvREFCNT_dec (req->fh2);
312 SvREFCNT_dec (req->callback); 465 SvREFCNT_dec (req->callback);
313 Safefree (req->statdata); 466 Safefree (req->statdata);
314 467
315 if (req->type == REQ_READDIR && req->result >= 0) 468 if (req->type == REQ_READDIR)
316 free (req->data2ptr); 469 free (req->data2ptr);
317 470
318 Safefree (req); 471 Safefree (req);
319} 472}
320 473
474static void req_cancel_subs (aio_req grp)
475{
476 aio_req sub;
477
478 if (grp->type != REQ_GROUP)
479 return;
480
481 SvREFCNT_dec (grp->fh2);
482 grp->fh2 = 0;
483
484 for (sub = grp->grp_first; sub; sub = sub->grp_next)
485 req_cancel (sub);
486}
487
321static void req_cancel (aio_req req) 488static void req_cancel (aio_req req)
322{ 489{
323 req->cancelled = 1; 490 req->flags |= FLAG_CANCELLED;
324 491
325 if (req->type == REQ_GROUP) 492 req_cancel_subs (req);
326 {
327 aio_req sub;
328
329 for (sub = req->grp_first; sub; sub = sub->grp_next)
330 req_cancel (sub);
331 }
332} 493}
333 494
334static int poll_cb () 495static int poll_cb ()
335{ 496{
336 dSP; 497 dSP;
338 int do_croak = 0; 499 int do_croak = 0;
339 aio_req req; 500 aio_req req;
340 501
341 for (;;) 502 for (;;)
342 { 503 {
343 pthread_mutex_lock (&reslock); 504 LOCK (reslock);
344 req = ress; 505 req = reqq_shift (&res_queue);
345 506
346 if (req) 507 if (req)
347 { 508 {
348 ress = req->next;
349
350 if (!ress) 509 if (!res_queue.size)
351 { 510 {
352 /* read any signals sent by the worker threads */ 511 /* read any signals sent by the worker threads */
353 char buf [32]; 512 char buf [32];
354 while (read (respipe [0], buf, 32) == 32) 513 while (read (respipe [0], buf, 32) == 32)
355 ; 514 ;
356
357 rese = 0;
358 } 515 }
359 } 516 }
360 517
361 pthread_mutex_unlock (&reslock); 518 UNLOCK (reslock);
362 519
363 if (!req) 520 if (!req)
364 break; 521 break;
365 522
366 --nreqs; 523 --nreqs;
400 557
401static void *aio_proc(void *arg); 558static void *aio_proc(void *arg);
402 559
403static void start_thread (void) 560static void start_thread (void)
404{ 561{
562 worker *wrk = calloc (1, sizeof (worker));
563
564 if (!wrk)
565 croak ("unable to allocate worker thread data");
566
405 sigset_t fullsigset, oldsigset; 567 sigset_t fullsigset, oldsigset;
406 pthread_t tid;
407 pthread_attr_t attr; 568 pthread_attr_t attr;
408 569
409 pthread_attr_init (&attr); 570 pthread_attr_init (&attr);
410 pthread_attr_setstacksize (&attr, STACKSIZE); 571 pthread_attr_setstacksize (&attr, STACKSIZE);
411 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 572 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
412 573
413 sigfillset (&fullsigset); 574 sigfillset (&fullsigset);
575
576 LOCK (wrklock);
414 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 577 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
415 578
416 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 579 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
580 {
581 wrk->prev = &wrk_first;
582 wrk->next = wrk_first.next;
583 wrk_first.next->prev = wrk;
584 wrk_first.next = wrk;
417 started++; 585 started++;
586 }
587 else
588 free (wrk);
418 589
419 sigprocmask (SIG_SETMASK, &oldsigset, 0); 590 sigprocmask (SIG_SETMASK, &oldsigset, 0);
591 UNLOCK (wrklock);
420} 592}
421 593
422static void req_send (aio_req req) 594static void req_send (aio_req req)
423{ 595{
424 while (started < wanted && nreqs >= started) 596 while (started < wanted && nreqs >= started)
425 start_thread (); 597 start_thread ();
426 598
427 ++nreqs; 599 ++nreqs;
428 600
429 pthread_mutex_lock (&reqlock); 601 LOCK (reqlock);
430 602 reqq_push (&req_queue, req);
431 req->next = 0;
432
433 if (reqe)
434 {
435 reqe->next = req;
436 reqe = req;
437 }
438 else
439 reqe = reqs = req;
440
441 pthread_cond_signal (&reqwait); 603 pthread_cond_signal (&reqwait);
442 pthread_mutex_unlock (&reqlock); 604 UNLOCK (reqlock);
443 605
444 if (nreqs > max_outstanding) 606 if (nreqs > max_outstanding)
445 for (;;) 607 for (;;)
446 { 608 {
447 poll_cb (); 609 poll_cb ();
454} 616}
455 617
456static void end_thread (void) 618static void end_thread (void)
457{ 619{
458 aio_req req; 620 aio_req req;
621
459 Newz (0, req, 1, aio_cb); 622 Newz (0, req, 1, aio_cb);
623
460 req->type = REQ_QUIT; 624 req->type = REQ_QUIT;
625 req->pri = PRI_MAX + PRI_BIAS;
461 626
462 req_send (req); 627 req_send (req);
463} 628}
464 629
465static void min_parallel (int nthreads) 630static void min_parallel (int nthreads)
517static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 682static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
518{ 683{
519 ssize_t res; 684 ssize_t res;
520 off_t ooffset; 685 off_t ooffset;
521 686
522 pthread_mutex_lock (&preadwritelock); 687 LOCK (preadwritelock);
523 ooffset = lseek (fd, 0, SEEK_CUR); 688 ooffset = lseek (fd, 0, SEEK_CUR);
524 lseek (fd, offset, SEEK_SET); 689 lseek (fd, offset, SEEK_SET);
525 res = read (fd, buf, count); 690 res = read (fd, buf, count);
526 lseek (fd, ooffset, SEEK_SET); 691 lseek (fd, ooffset, SEEK_SET);
527 pthread_mutex_unlock (&preadwritelock); 692 UNLOCK (preadwritelock);
528 693
529 return res; 694 return res;
530} 695}
531 696
532static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 697static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
533{ 698{
534 ssize_t res; 699 ssize_t res;
535 off_t ooffset; 700 off_t ooffset;
536 701
537 pthread_mutex_lock (&preadwritelock); 702 LOCK (preadwritelock);
538 ooffset = lseek (fd, 0, SEEK_CUR); 703 ooffset = lseek (fd, 0, SEEK_CUR);
539 lseek (fd, offset, SEEK_SET); 704 lseek (fd, offset, SEEK_SET);
540 res = write (fd, buf, count); 705 res = write (fd, buf, count);
541 lseek (fd, offset, SEEK_SET); 706 lseek (fd, offset, SEEK_SET);
542 pthread_mutex_unlock (&preadwritelock); 707 UNLOCK (preadwritelock);
543 708
544 return res; 709 return res;
545} 710}
546#endif 711#endif
547 712
552#if !HAVE_READAHEAD 717#if !HAVE_READAHEAD
553# define readahead aio_readahead 718# define readahead aio_readahead
554 719
555static ssize_t readahead (int fd, off_t offset, size_t count) 720static ssize_t readahead (int fd, off_t offset, size_t count)
556{ 721{
557 char readahead_buf[4096]; 722 dBUF;
558 723
559 while (count > 0) 724 while (count > 0)
560 { 725 {
561 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 726 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
562 727
563 pread (fd, readahead_buf, len, offset); 728 pread (fd, aio_buf, len, offset);
564 offset += len; 729 offset += len;
565 count -= len; 730 count -= len;
566 } 731 }
567 732
568 errno = 0; 733 errno = 0;
577static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 742static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
578{ 743{
579 struct dirent *e; 744 struct dirent *e;
580 int errorno; 745 int errorno;
581 746
582 pthread_mutex_lock (&readdirlock); 747 LOCK (readdirlock);
583 748
584 e = readdir (dirp); 749 e = readdir (dirp);
585 errorno = errno; 750 errorno = errno;
586 751
587 if (e) 752 if (e)
590 strcpy (ent->d_name, e->d_name); 755 strcpy (ent->d_name, e->d_name);
591 } 756 }
592 else 757 else
593 *res = 0; 758 *res = 0;
594 759
595 pthread_mutex_unlock (&readdirlock); 760 UNLOCK (readdirlock);
596 761
597 errno = errorno; 762 errno = errorno;
598 return e ? 0 : -1; 763 return e ? 0 : -1;
599} 764}
600#endif 765#endif
601 766
602/* sendfile always needs emulation */ 767/* sendfile always needs emulation */
603static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 768static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
604{ 769{
605 ssize_t res; 770 ssize_t res;
606 771
607 if (!count) 772 if (!count)
608 return 0; 773 return 0;
657#endif 822#endif
658 ) 823 )
659 ) 824 )
660 { 825 {
661 /* emulate sendfile. this is a major pain in the ass */ 826 /* emulate sendfile. this is a major pain in the ass */
662 char buf[4096]; 827 dBUF;
828
663 res = 0; 829 res = 0;
664 830
665 while (count) 831 while (count)
666 { 832 {
667 ssize_t cnt; 833 ssize_t cnt;
668 834
669 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 835 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
670 836
671 if (cnt <= 0) 837 if (cnt <= 0)
672 { 838 {
673 if (cnt && !res) res = -1; 839 if (cnt && !res) res = -1;
674 break; 840 break;
675 } 841 }
676 842
677 cnt = write (ofd, buf, cnt); 843 cnt = write (ofd, aio_buf, cnt);
678 844
679 if (cnt <= 0) 845 if (cnt <= 0)
680 { 846 {
681 if (cnt && !res) res = -1; 847 if (cnt && !res) res = -1;
682 break; 848 break;
690 856
691 return res; 857 return res;
692} 858}
693 859
694/* read a full directory */ 860/* read a full directory */
695static int scandir_ (const char *path, void **namesp) 861static void scandir_ (aio_req req, worker *self)
696{ 862{
697 DIR *dirp = opendir (path); 863 DIR *dirp;
698 union 864 union
699 { 865 {
700 struct dirent d; 866 struct dirent d;
701 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 867 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
702 } u; 868 } *u;
703 struct dirent *entp; 869 struct dirent *entp;
704 char *name, *names; 870 char *name, *names;
705 int memlen = 4096; 871 int memlen = 4096;
706 int memofs = 0; 872 int memofs = 0;
707 int res = 0; 873 int res = 0;
708 int errorno; 874 int errorno;
709 875
710 if (!dirp) 876 LOCK (wrklock);
711 return -1; 877 self->dirp = dirp = opendir (req->dataptr);
878 self->dbuf = u = malloc (sizeof (*u));
879 UNLOCK (wrklock);
712 880
713 names = malloc (memlen); 881 req->data2ptr = names = malloc (memlen);
714 882
883 if (dirp && u && names)
715 for (;;) 884 for (;;)
716 { 885 {
886 errno = 0;
717 errno = 0, readdir_r (dirp, &u.d, &entp); 887 readdir_r (dirp, &u->d, &entp);
718 888
719 if (!entp) 889 if (!entp)
720 break; 890 break;
721 891
722 name = entp->d_name; 892 name = entp->d_name;
723 893
724 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 894 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
725 { 895 {
726 int len = strlen (name) + 1; 896 int len = strlen (name) + 1;
727 897
728 res++; 898 res++;
729 899
730 while (memofs + len > memlen) 900 while (memofs + len > memlen)
731 { 901 {
732 memlen *= 2; 902 memlen *= 2;
903 LOCK (wrklock);
733 names = realloc (names, memlen); 904 req->data2ptr = names = realloc (names, memlen);
905 UNLOCK (wrklock);
906
734 if (!names) 907 if (!names)
735 break; 908 break;
736 } 909 }
737 910
738 memcpy (names + memofs, name, len); 911 memcpy (names + memofs, name, len);
739 memofs += len; 912 memofs += len;
740 } 913 }
741 } 914 }
742 915
743 errorno = errno;
744 closedir (dirp);
745
746 if (errorno) 916 if (errno)
747 {
748 free (names);
749 errno = errorno;
750 res = -1; 917 res = -1;
751 } 918
752 919 req->result = res;
753 *namesp = (void *)names;
754 return res;
755} 920}
756 921
757/*****************************************************************************/ 922/*****************************************************************************/
758 923
759static void *aio_proc (void *thr_arg) 924static void *aio_proc (void *thr_arg)
760{ 925{
761 aio_req req; 926 aio_req req;
762 int type; 927 int type;
928 worker *self = (worker *)thr_arg;
763 929
764 do 930 do
765 { 931 {
766 pthread_mutex_lock (&reqlock); 932 LOCK (reqlock);
767 933
768 for (;;) 934 for (;;)
769 { 935 {
770 req = reqs; 936 self->req = req = reqq_shift (&req_queue);
771
772 if (reqs)
773 {
774 reqs = reqs->next;
775 if (!reqs) reqe = 0;
776 }
777 937
778 if (req) 938 if (req)
779 break; 939 break;
780 940
781 pthread_cond_wait (&reqwait, &reqlock); 941 pthread_cond_wait (&reqwait, &reqlock);
782 } 942 }
783 943
784 pthread_mutex_unlock (&reqlock); 944 UNLOCK (reqlock);
785 945
786 errno = 0; /* strictly unnecessary */ 946 errno = 0; /* strictly unnecessary */
787
788 if (!req->cancelled)
789 switch (type = req->type) /* remember type for QUIT check */ 947 type = req->type; /* remember type for QUIT check */
948
949 if (!(req->flags & FLAG_CANCELLED))
950 switch (type)
790 { 951 {
791 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 952 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
792 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 953 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
793 954
794 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 955 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
795 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 956 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
796 957
797 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 958 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
798 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 959 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
799 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 960 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
800 961
806 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 967 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
807 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 968 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
808 969
809 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 970 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
810 case REQ_FSYNC: req->result = fsync (req->fd); break; 971 case REQ_FSYNC: req->result = fsync (req->fd); break;
811 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 972 case REQ_READDIR: scandir_ (req, self); break;
812 973
813 case REQ_SLEEP: 974 case REQ_BUSY:
814 { 975 {
815 struct timeval tv; 976 struct timeval tv;
816 977
817 tv.tv_sec = req->fd; 978 tv.tv_sec = req->fd;
818 tv.tv_usec = req->fd2; 979 tv.tv_usec = req->fd2;
819 980
820 req->result = select (0, 0, 0, 0, &tv); 981 req->result = select (0, 0, 0, 0, &tv);
821 } 982 }
822 983
984 case REQ_GROUP:
985 case REQ_NOP:
823 case REQ_QUIT: 986 case REQ_QUIT:
824 break; 987 break;
825 988
826 default: 989 default:
827 req->result = ENOSYS; 990 req->result = ENOSYS;
828 break; 991 break;
829 } 992 }
830 993
831 req->errorno = errno; 994 req->errorno = errno;
832 995
833 pthread_mutex_lock (&reslock); 996 LOCK (reslock);
834 997
835 req->next = 0; 998 if (!reqq_push (&res_queue, req))
836
837 printf ("queue rese %p\n", rese);//D
838 if (rese)
839 {
840 rese->next = req;
841 rese = req;
842 }
843 else
844 {
845 rese = ress = req;
846
847 /* write a dummy byte to the pipe so fh becomes ready */ 999 /* write a dummy byte to the pipe so fh becomes ready */
848 write (respipe [1], &respipe, 1); 1000 write (respipe [1], &respipe, 1);
849 }
850 1001
851 pthread_mutex_unlock (&reslock); 1002 self->req = 0;
1003 worker_clear (self);
1004
1005 UNLOCK (reslock);
852 } 1006 }
853 while (type != REQ_QUIT); 1007 while (type != REQ_QUIT);
854 1008
1009 LOCK (wrklock);
1010 worker_free (self);
1011 UNLOCK (wrklock);
1012
855 return 0; 1013 return 0;
856} 1014}
857 1015
858/*****************************************************************************/ 1016/*****************************************************************************/
859 1017
860static void atfork_prepare (void) 1018static void atfork_prepare (void)
861{ 1019{
862 pthread_mutex_lock (&reqlock); 1020 LOCK (wrklock);
863 pthread_mutex_lock (&reslock); 1021 LOCK (reqlock);
1022 LOCK (reslock);
864#if !HAVE_PREADWRITE 1023#if !HAVE_PREADWRITE
865 pthread_mutex_lock (&preadwritelock); 1024 LOCK (preadwritelock);
866#endif 1025#endif
867#if !HAVE_READDIR_R 1026#if !HAVE_READDIR_R
868 pthread_mutex_lock (&readdirlock); 1027 LOCK (readdirlock);
869#endif 1028#endif
870} 1029}
871 1030
872static void atfork_parent (void) 1031static void atfork_parent (void)
873{ 1032{
874#if !HAVE_READDIR_R 1033#if !HAVE_READDIR_R
875 pthread_mutex_unlock (&readdirlock); 1034 UNLOCK (readdirlock);
876#endif 1035#endif
877#if !HAVE_PREADWRITE 1036#if !HAVE_PREADWRITE
878 pthread_mutex_unlock (&preadwritelock); 1037 UNLOCK (preadwritelock);
879#endif 1038#endif
880 pthread_mutex_unlock (&reslock); 1039 UNLOCK (reslock);
881 pthread_mutex_unlock (&reqlock); 1040 UNLOCK (reqlock);
1041 UNLOCK (wrklock);
882} 1042}
883 1043
884static void atfork_child (void) 1044static void atfork_child (void)
885{ 1045{
886 aio_req prv; 1046 aio_req prv;
887 1047
1048 while (prv = reqq_shift (&req_queue))
1049 req_free (prv);
1050
1051 while (prv = reqq_shift (&res_queue))
1052 req_free (prv);
1053
1054 while (wrk_first.next != &wrk_first)
1055 {
1056 worker *wrk = wrk_first.next;
1057
1058 if (wrk->req)
1059 req_free (wrk->req);
1060
1061 worker_clear (wrk);
1062 worker_free (wrk);
1063 }
1064
888 started = 0; 1065 started = 0;
889 1066 nreqs = 0;
890 while (reqs)
891 {
892 prv = reqs;
893 reqs = prv->next;
894 req_free (prv);
895 }
896
897 reqs = reqe = 0;
898
899 while (ress)
900 {
901 prv = ress;
902 ress = prv->next;
903 req_free (prv);
904 }
905
906 ress = rese = 0;
907 1067
908 close (respipe [0]); 1068 close (respipe [0]);
909 close (respipe [1]); 1069 close (respipe [1]);
910 create_pipe (); 1070 create_pipe ();
911 1071
912 atfork_parent (); 1072 atfork_parent ();
913} 1073}
914 1074
915#define dREQ \ 1075#define dREQ \
916 aio_req req; \ 1076 aio_req req; \
1077 int req_pri = next_pri; \
1078 next_pri = DEFAULT_PRI + PRI_BIAS; \
917 \ 1079 \
918 if (SvOK (callback) && !SvROK (callback)) \ 1080 if (SvOK (callback) && !SvROK (callback)) \
919 croak ("callback must be undef or of reference type"); \ 1081 croak ("callback must be undef or of reference type"); \
920 \ 1082 \
921 Newz (0, req, 1, aio_cb); \ 1083 Newz (0, req, 1, aio_cb); \
922 if (!req) \ 1084 if (!req) \
923 croak ("out of memory during aio_req allocation"); \ 1085 croak ("out of memory during aio_req allocation"); \
924 \ 1086 \
925 req->callback = newSVsv (callback) 1087 req->callback = newSVsv (callback); \
1088 req->pri = req_pri
926 1089
927#define REQ_SEND \ 1090#define REQ_SEND \
928 req_send (req); \ 1091 req_send (req); \
929 \ 1092 \
930 if (GIMME_V != G_VOID) \ 1093 if (GIMME_V != G_VOID) \
1184 1347
1185 REQ_SEND; 1348 REQ_SEND;
1186} 1349}
1187 1350
1188void 1351void
1189aio_sleep (delay,callback=&PL_sv_undef) 1352aio_busy (delay,callback=&PL_sv_undef)
1190 double delay 1353 double delay
1191 SV * callback 1354 SV * callback
1192 PPCODE: 1355 PPCODE:
1193{ 1356{
1194 dREQ; 1357 dREQ;
1195 1358
1196 req->type = REQ_SLEEP; 1359 req->type = REQ_BUSY;
1197 req->fd = delay < 0. ? 0 : delay; 1360 req->fd = delay < 0. ? 0 : delay;
1198 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1361 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1199 1362
1200 REQ_SEND; 1363 REQ_SEND;
1201} 1364}
1205 SV * callback 1368 SV * callback
1206 PROTOTYPE: ;$ 1369 PROTOTYPE: ;$
1207 PPCODE: 1370 PPCODE:
1208{ 1371{
1209 dREQ; 1372 dREQ;
1373
1210 req->type = REQ_GROUP; 1374 req->type = REQ_GROUP;
1211 req_send (req); 1375 req_send (req);
1376
1212 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1377 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1213} 1378}
1379
1380void
1381aio_nop (callback=&PL_sv_undef)
1382 SV * callback
1383 PPCODE:
1384{
1385 dREQ;
1386
1387 req->type = REQ_NOP;
1388
1389 REQ_SEND;
1390}
1391
1392void
1393aioreq_pri (int pri = DEFAULT_PRI)
1394 CODE:
1395 if (pri < PRI_MIN) pri = PRI_MIN;
1396 if (pri > PRI_MAX) pri = PRI_MAX;
1397 next_pri = pri + PRI_BIAS;
1398
1399void
1400aioreq_nice (int nice = 0)
1401 CODE:
1402 nice = next_pri - nice;
1403 if (nice < PRI_MIN) nice = PRI_MIN;
1404 if (nice > PRI_MAX) nice = PRI_MAX;
1405 next_pri = nice + PRI_BIAS;
1214 1406
1215void 1407void
1216flush () 1408flush ()
1217 PROTOTYPE: 1409 PROTOTYPE:
1218 CODE: 1410 CODE:
1267 1459
1268MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1460MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1269 1461
1270void 1462void
1271cancel (aio_req_ornot req) 1463cancel (aio_req_ornot req)
1272 PROTOTYPE:
1273 CODE: 1464 CODE:
1274 req_cancel (req); 1465 req_cancel (req);
1275 1466
1467void
1468cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1469 CODE:
1470 SvREFCNT_dec (req->callback);
1471 req->callback = newSVsv (callback);
1472
1276MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1473MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1277 1474
1278void 1475void
1279add (aio_req grp, ...) 1476add (aio_req grp, ...)
1280 PPCODE: 1477 PPCODE:
1281{ 1478{
1282 int i; 1479 int i;
1480 aio_req req;
1283 1481
1284 if (grp->fd == 2) 1482 if (grp->fd == 2)
1285 croak ("cannot add requests to IO::AIO::GRP after the group finished"); 1483 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1286 1484
1287 for (i = 1; i < items; ++i ) 1485 for (i = 1; i < items; ++i )
1288 { 1486 {
1289 if (GIMME_V != G_VOID) 1487 if (GIMME_V != G_VOID)
1290 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1488 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1291 1489
1292 aio_req req = SvAIO_REQ (ST (i)); 1490 req = SvAIO_REQ (ST (i));
1293 1491
1294 if (req) 1492 if (req)
1295 { 1493 {
1296 ++grp->length; 1494 ++grp->length;
1297 req->grp = grp; 1495 req->grp = grp;
1306 } 1504 }
1307 } 1505 }
1308} 1506}
1309 1507
1310void 1508void
1509cancel_subs (aio_req_ornot req)
1510 CODE:
1511 req_cancel_subs (req);
1512
1513void
1311result (aio_req grp, ...) 1514result (aio_req grp, ...)
1312 CODE: 1515 CODE:
1313{ 1516{
1314 int i; 1517 int i;
1315 AV *av = newAV (); 1518 AV *av = newAV ();
1320 SvREFCNT_dec (grp->data); 1523 SvREFCNT_dec (grp->data);
1321 grp->data = (SV *)av; 1524 grp->data = (SV *)av;
1322} 1525}
1323 1526
1324void 1527void
1325lock (aio_req grp)
1326 CODE:
1327 ++grp->length;
1328
1329void
1330unlock (aio_req grp)
1331 CODE:
1332 aio_grp_dec (grp);
1333
1334void
1335feeder_limit (aio_req grp, int limit) 1528limit (aio_req grp, int limit)
1336 CODE: 1529 CODE:
1337 grp->fd2 = limit; 1530 grp->fd2 = limit;
1338 aio_grp_feed (grp); 1531 aio_grp_feed (grp);
1339 1532
1340void 1533void
1341set_feeder (aio_req grp, SV *callback=&PL_sv_undef) 1534feed (aio_req grp, SV *callback=&PL_sv_undef)
1342 CODE: 1535 CODE:
1343{ 1536{
1344 SvREFCNT_dec (grp->fh2); 1537 SvREFCNT_dec (grp->fh2);
1345 grp->fh2 = newSVsv (callback); 1538 grp->fh2 = newSVsv (callback);
1346 1539

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines