ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.49 by root, Sun Oct 22 13:33:28 2006 UTC vs.
Revision 1.71 by root, Tue Oct 24 16:35:04 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux
5# define _GNU_SOURCE
6#endif
7
1#define _REENTRANT 1 8#define _REENTRANT 1
9
2#include <errno.h> 10#include <errno.h>
3 11
4#include "EXTERN.h" 12#include "EXTERN.h"
5#include "perl.h" 13#include "perl.h"
6#include "XSUB.h" 14#include "XSUB.h"
41# define NAME_MAX 4096 49# define NAME_MAX 4096
42#endif 50#endif
43 51
44#if __ia64 52#if __ia64
45# define STACKSIZE 65536 53# define STACKSIZE 65536
54#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55# define STACKSIZE PTHREAD_STACK_MIN
46#else 56#else
47# define STACKSIZE 8192 57# define STACKSIZE 16384
48#endif 58#endif
59
60/* buffer size for various temporary buffers */
61#define AIO_BUFSIZE 65536
62
63#define dBUF \
64 char *aio_buf; \
65 LOCK (wrklock); \
66 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
67 UNLOCK (wrklock); \
68 if (!aio_buf) \
69 return -1;
49 70
50enum { 71enum {
51 REQ_QUIT, 72 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 73 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 74 REQ_READ, REQ_WRITE, REQ_READAHEAD,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 76 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 77 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 78 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 79 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 80 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 81 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 82 REQ_BUSY,
62}; 83};
63 84
64#define AIO_REQ_KLASS "IO::AIO::REQ" 85#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 86#define AIO_GRP_KLASS "IO::AIO::GRP"
66 87
67typedef struct aio_cb 88typedef struct aio_cb
68{ 89{
69 struct aio_cb *volatile next; 90 struct aio_cb *volatile next;
70
71 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 91
75 SV *data, *callback; 92 SV *data, *callback;
76 SV *fh, *fh2; 93 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 94 void *dataptr, *data2ptr;
78 Stat_t *statdata; 95 Stat_t *statdata;
79 off_t offset; 96 off_t offset;
80 size_t length; 97 size_t length;
81 ssize_t result; 98 ssize_t result;
82 99
100 STRLEN dataoffset;
83 int type; 101 int type;
84 int fd, fd2; 102 int fd, fd2;
85 int errorno; 103 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 104 mode_t mode; /* open */
105
88 unsigned char cancelled; 106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 111} aio_cb;
112
113enum {
114 FLAG_CANCELLED = 0x01,
115};
90 116
91typedef aio_cb *aio_req; 117typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 118typedef aio_cb *aio_req_ornot;
119
120enum {
121 PRI_MIN = -4,
122 PRI_MAX = 4,
123
124 DEFAULT_PRI = 0,
125 PRI_BIAS = -PRI_MIN,
126 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
127};
128
129static int next_pri = DEFAULT_PRI + PRI_BIAS;
93 130
94static int started, wanted; 131static int started, wanted;
95static volatile int nreqs; 132static volatile int nreqs;
96static int max_outstanding = 1<<30; 133static int max_outstanding = 1<<30;
97static int respipe [2]; 134static int respipe [2];
98 135
136#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
137# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
138#else
139# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
140#endif
141
142#define LOCK(mutex) pthread_mutex_lock (&(mutex))
143#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
144
145/* worker threasd management */
146static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
147
148typedef struct worker {
149 /* locked by wrklock */
150 struct worker *prev, *next;
151
152 pthread_t tid;
153
154 /* locked by reslock, reqlock or wrklock */
155 aio_req req; /* currently processed request */
156 void *dbuf;
157 DIR *dirp;
158} worker;
159
160static worker wrk_first = { &wrk_first, &wrk_first, 0 };
161
162static void worker_clear (worker *wrk)
163{
164 if (wrk->dirp)
165 {
166 closedir (wrk->dirp);
167 wrk->dirp = 0;
168 }
169
170 if (wrk->dbuf)
171 {
172 free (wrk->dbuf);
173 wrk->dbuf = 0;
174 }
175}
176
177static void worker_free (worker *wrk)
178{
179 wrk->next->prev = wrk->prev;
180 wrk->prev->next = wrk->next;
181
182 free (wrk);
183}
184
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 185static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 186static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 187static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 188
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 189/*
104static volatile aio_req ress, rese; /* queue start, queue end */ 190 * a somewhat faster data structure might be nice, but
191 * with 8 priorities this actually needs <20 insns
192 * per shift, the most expensive operation.
193 */
194typedef struct {
195 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
196 int size;
197} reqq;
105 198
199static reqq req_queue;
200static reqq res_queue;
201
202int reqq_push (reqq *q, aio_req req)
203{
204 int pri = req->pri;
205 req->next = 0;
206
207 if (q->qe[pri])
208 {
209 q->qe[pri]->next = req;
210 q->qe[pri] = req;
211 }
212 else
213 q->qe[pri] = q->qs[pri] = req;
214
215 return q->size++;
216}
217
218aio_req reqq_shift (reqq *q)
219{
220 int pri;
221
222 if (!q->size)
223 return 0;
224
225 --q->size;
226
227 for (pri = NUM_PRI; pri--; )
228 {
229 aio_req req = q->qs[pri];
230
231 if (req)
232 {
233 if (!(q->qs[pri] = req->next))
234 q->qe[pri] = 0;
235
236 return req;
237 }
238 }
239
240 abort ();
241}
242
243static void req_invoke (aio_req req);
106static void req_free (aio_req req); 244static void req_free (aio_req req);
107 245
108/* must be called at most once */ 246/* must be called at most once */
109static SV *req_sv (aio_req req, const char *klass) 247static SV *req_sv (aio_req req, const char *klass)
110{ 248{
117 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 255 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
118} 256}
119 257
120static aio_req SvAIO_REQ (SV *sv) 258static aio_req SvAIO_REQ (SV *sv)
121{ 259{
260 MAGIC *mg;
261
122 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 262 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
123 croak ("object of class " AIO_REQ_KLASS " expected"); 263 croak ("object of class " AIO_REQ_KLASS " expected");
124 264
125 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 265 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
126 266
127 return mg ? (aio_req)mg->mg_ptr : 0; 267 return mg ? (aio_req)mg->mg_ptr : 0;
128} 268}
129 269
130static void aio_grp_feed (aio_req grp) 270static void aio_grp_feed (aio_req grp)
131{ 271{
132 while (grp->length < grp->fd2) 272 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
133 { 273 {
134 int old_len = grp->length; 274 int old_len = grp->length;
135 275
136 if (grp->fh2 && SvOK (grp->fh2)) 276 if (grp->fh2 && SvOK (grp->fh2))
137 { 277 {
140 ENTER; 280 ENTER;
141 SAVETMPS; 281 SAVETMPS;
142 PUSHMARK (SP); 282 PUSHMARK (SP);
143 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 283 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
144 PUTBACK; 284 PUTBACK;
145 call_sv (grp->fh2, G_VOID | G_EVAL); 285 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
146 SPAGAIN; 286 SPAGAIN;
147 FREETMPS; 287 FREETMPS;
148 LEAVE; 288 LEAVE;
149 } 289 }
150 290
156 break; 296 break;
157 } 297 }
158 } 298 }
159} 299}
160 300
301static void aio_grp_dec (aio_req grp)
302{
303 --grp->length;
304
305 /* call feeder, if applicable */
306 aio_grp_feed (grp);
307
308 /* finish, if done */
309 if (!grp->length && grp->fd)
310 {
311 req_invoke (grp);
312 req_free (grp);
313 }
314}
315
161static void poll_wait () 316static void poll_wait ()
162{ 317{
163 if (nreqs && !ress)
164 {
165 fd_set rfd; 318 fd_set rfd;
319
320 while (nreqs)
321 {
322 int size;
323#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
324 LOCK (reslock);
325#endif
326 size = res_queue.size;
327#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
328 UNLOCK (reslock);
329#endif
330
331 if (size)
332 return;
333
166 FD_ZERO(&rfd); 334 FD_ZERO(&rfd);
167 FD_SET(respipe [0], &rfd); 335 FD_SET(respipe [0], &rfd);
168 336
169 select (respipe [0] + 1, &rfd, 0, 0, 0); 337 select (respipe [0] + 1, &rfd, 0, 0, 0);
170 } 338 }
171} 339}
172 340
173static void req_invoke (aio_req req) 341static void req_invoke (aio_req req)
174{ 342{
175 dSP; 343 dSP;
176 int errorno = errno;
177 344
178 if (req->cancelled || !SvOK (req->callback)) 345 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
179 return; 346 {
180
181 errno = req->errorno; 347 errno = req->errorno;
182 348
183 ENTER; 349 ENTER;
184 SAVETMPS; 350 SAVETMPS;
185 PUSHMARK (SP); 351 PUSHMARK (SP);
186 EXTEND (SP, 1); 352 EXTEND (SP, 1);
187 353
188 switch (req->type) 354 switch (req->type)
189 {
190 case REQ_READDIR:
191 { 355 {
192 SV *rv = &PL_sv_undef; 356 case REQ_READDIR:
193
194 if (req->result >= 0)
195 { 357 {
196 char *buf = req->data2ptr; 358 SV *rv = &PL_sv_undef;
197 AV *av = newAV ();
198 359
199 while (req->result) 360 if (req->result >= 0)
200 { 361 {
362 int i;
363 char *buf = req->data2ptr;
364 AV *av = newAV ();
365
366 av_extend (av, req->result - 1);
367
368 for (i = 0; i < req->result; ++i)
369 {
201 SV *sv = newSVpv (buf, 0); 370 SV *sv = newSVpv (buf, 0);
202 371
203 av_push (av, sv); 372 av_store (av, i, sv);
204 buf += SvCUR (sv) + 1; 373 buf += SvCUR (sv) + 1;
205 req->result--; 374 }
375
376 rv = sv_2mortal (newRV_noinc ((SV *)av));
206 } 377 }
207 378
208 rv = sv_2mortal (newRV_noinc ((SV *)av)); 379 PUSHs (rv);
209 } 380 }
381 break;
210 382
211 PUSHs (rv); 383 case REQ_OPEN:
384 {
385 /* convert fd to fh */
386 SV *fh;
387
388 PUSHs (sv_2mortal (newSViv (req->result)));
389 PUTBACK;
390 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
391 SPAGAIN;
392
393 fh = SvREFCNT_inc (POPs);
394
395 PUSHMARK (SP);
396 XPUSHs (sv_2mortal (fh));
397 }
398 break;
399
400 case REQ_GROUP:
401 req->fd = 2; /* mark group as finished */
402
403 if (req->data)
404 {
405 int i;
406 AV *av = (AV *)req->data;
407
408 EXTEND (SP, AvFILL (av) + 1);
409 for (i = 0; i <= AvFILL (av); ++i)
410 PUSHs (*av_fetch (av, i, 0));
411 }
412 break;
413
414 case REQ_NOP:
415 case REQ_BUSY:
416 break;
417
418 default:
419 PUSHs (sv_2mortal (newSViv (req->result)));
420 break;
212 } 421 }
213 break;
214 422
215 case REQ_OPEN:
216 {
217 /* convert fd to fh */
218 SV *fh;
219 423
220 PUSHs (sv_2mortal (newSViv (req->result)));
221 PUTBACK; 424 PUTBACK;
222 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
223 SPAGAIN;
224
225 fh = SvREFCNT_inc (POPs);
226
227 PUSHMARK (SP);
228 XPUSHs (sv_2mortal (fh));
229 }
230 break;
231
232 case REQ_GROUP:
233 req->fd = 2; /* mark group as finished */
234
235 if (req->data)
236 {
237 int i;
238 AV *av = (AV *)req->data;
239
240 EXTEND (SP, AvFILL (av) + 1);
241 for (i = 0; i <= AvFILL (av); ++i)
242 PUSHs (*av_fetch (av, i, 0));
243 }
244 break;
245
246 case REQ_SLEEP:
247 break;
248
249 default:
250 PUSHs (sv_2mortal (newSViv (req->result)));
251 break;
252 }
253
254
255 PUTBACK;
256 call_sv (req->callback, G_VOID | G_EVAL); 425 call_sv (req->callback, G_VOID | G_EVAL);
257 SPAGAIN; 426 SPAGAIN;
258 427
259 if (SvTRUE (ERRSV))
260 {
261 req_free (req);
262 croak (0);
263 }
264
265 FREETMPS; 428 FREETMPS;
266 LEAVE; 429 LEAVE;
430 }
267 431
268 errno = errorno;
269}
270
271static void req_free (aio_req req)
272{
273 if (req->grp) 432 if (req->grp)
274 { 433 {
275 aio_req grp = req->grp; 434 aio_req grp = req->grp;
276 435
277 /* unlink request */ 436 /* unlink request */
279 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next; 438 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
280 439
281 if (grp->grp_first == req) 440 if (grp->grp_first == req)
282 grp->grp_first = req->grp_next; 441 grp->grp_first = req->grp_next;
283 442
284 --grp->length;
285
286 /* call feeder, if applicable */
287 aio_grp_feed (grp); 443 aio_grp_dec (grp);
444 }
288 445
289 /* finish, if done */ 446 if (SvTRUE (ERRSV))
290 if (!grp->length && grp->fd) 447 {
291 {
292 req_invoke (grp);
293 req_free (grp); 448 req_free (req);
294 } 449 croak (0);
295 } 450 }
451}
296 452
453static void req_free (aio_req req)
454{
297 if (req->self) 455 if (req->self)
298 { 456 {
299 sv_unmagic (req->self, PERL_MAGIC_ext); 457 sv_unmagic (req->self, PERL_MAGIC_ext);
300 SvREFCNT_dec (req->self); 458 SvREFCNT_dec (req->self);
301 } 459 }
304 SvREFCNT_dec (req->fh); 462 SvREFCNT_dec (req->fh);
305 SvREFCNT_dec (req->fh2); 463 SvREFCNT_dec (req->fh2);
306 SvREFCNT_dec (req->callback); 464 SvREFCNT_dec (req->callback);
307 Safefree (req->statdata); 465 Safefree (req->statdata);
308 466
309 if (req->type == REQ_READDIR && req->result >= 0) 467 if (req->type == REQ_READDIR)
310 free (req->data2ptr); 468 free (req->data2ptr);
311 469
312 Safefree (req); 470 Safefree (req);
313} 471}
314 472
315static void req_cancel (aio_req req) 473static void req_cancel (aio_req req)
316{ 474{
317 req->cancelled = 1; 475 req->flags |= FLAG_CANCELLED;
318 476
319 if (req->type == REQ_GROUP) 477 if (req->type == REQ_GROUP)
320 { 478 {
321 aio_req sub; 479 aio_req sub;
322 480
332 int do_croak = 0; 490 int do_croak = 0;
333 aio_req req; 491 aio_req req;
334 492
335 for (;;) 493 for (;;)
336 { 494 {
337 pthread_mutex_lock (&reslock); 495 LOCK (reslock);
338 req = ress; 496 req = reqq_shift (&res_queue);
339 497
340 if (req) 498 if (req)
341 { 499 {
342 ress = req->next;
343
344 if (!ress) 500 if (!res_queue.size)
345 { 501 {
346 /* read any signals sent by the worker threads */ 502 /* read any signals sent by the worker threads */
347 char buf [32]; 503 char buf [32];
348 while (read (respipe [0], buf, 32) == 32) 504 while (read (respipe [0], buf, 32) == 32)
349 ; 505 ;
350
351 rese = 0;
352 } 506 }
353 } 507 }
354 508
355 pthread_mutex_unlock (&reslock); 509 UNLOCK (reslock);
356 510
357 if (!req) 511 if (!req)
358 break; 512 break;
359 513
360 nreqs--; 514 --nreqs;
361 515
362 if (req->type == REQ_QUIT) 516 if (req->type == REQ_QUIT)
363 started--; 517 started--;
364 else if (req->type == REQ_GROUP && req->length) 518 else if (req->type == REQ_GROUP && req->length)
365 { 519 {
394 548
395static void *aio_proc(void *arg); 549static void *aio_proc(void *arg);
396 550
397static void start_thread (void) 551static void start_thread (void)
398{ 552{
553 worker *wrk = calloc (1, sizeof (worker));
554
555 if (!wrk)
556 croak ("unable to allocate worker thread data");
557
399 sigset_t fullsigset, oldsigset; 558 sigset_t fullsigset, oldsigset;
400 pthread_t tid;
401 pthread_attr_t attr; 559 pthread_attr_t attr;
402 560
403 pthread_attr_init (&attr); 561 pthread_attr_init (&attr);
404 pthread_attr_setstacksize (&attr, STACKSIZE); 562 pthread_attr_setstacksize (&attr, STACKSIZE);
405 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 563 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
406 564
407 sigfillset (&fullsigset); 565 sigfillset (&fullsigset);
566
567 LOCK (wrklock);
408 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 568 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
409 569
410 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 570 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
571 {
572 wrk->prev = &wrk_first;
573 wrk->next = wrk_first.next;
574 wrk_first.next->prev = wrk;
575 wrk_first.next = wrk;
411 started++; 576 started++;
577 }
578 else
579 free (wrk);
412 580
413 sigprocmask (SIG_SETMASK, &oldsigset, 0); 581 sigprocmask (SIG_SETMASK, &oldsigset, 0);
582 UNLOCK (wrklock);
414} 583}
415 584
416static void req_send (aio_req req) 585static void req_send (aio_req req)
417{ 586{
418 while (started < wanted && nreqs >= started) 587 while (started < wanted && nreqs >= started)
419 start_thread (); 588 start_thread ();
420 589
421 nreqs++; 590 ++nreqs;
422 591
423 pthread_mutex_lock (&reqlock); 592 LOCK (reqlock);
424 593 reqq_push (&req_queue, req);
425 req->next = 0;
426
427 if (reqe)
428 {
429 reqe->next = req;
430 reqe = req;
431 }
432 else
433 reqe = reqs = req;
434
435 pthread_cond_signal (&reqwait); 594 pthread_cond_signal (&reqwait);
436 pthread_mutex_unlock (&reqlock); 595 UNLOCK (reqlock);
437 596
438 if (nreqs > max_outstanding) 597 if (nreqs > max_outstanding)
439 for (;;) 598 for (;;)
440 { 599 {
441 poll_cb (); 600 poll_cb ();
448} 607}
449 608
450static void end_thread (void) 609static void end_thread (void)
451{ 610{
452 aio_req req; 611 aio_req req;
612
453 Newz (0, req, 1, aio_cb); 613 Newz (0, req, 1, aio_cb);
614
454 req->type = REQ_QUIT; 615 req->type = REQ_QUIT;
616 req->pri = PRI_MAX + PRI_BIAS;
455 617
456 req_send (req); 618 req_send (req);
457} 619}
458 620
459static void min_parallel (int nthreads) 621static void min_parallel (int nthreads)
511static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 673static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
512{ 674{
513 ssize_t res; 675 ssize_t res;
514 off_t ooffset; 676 off_t ooffset;
515 677
516 pthread_mutex_lock (&preadwritelock); 678 LOCK (preadwritelock);
517 ooffset = lseek (fd, 0, SEEK_CUR); 679 ooffset = lseek (fd, 0, SEEK_CUR);
518 lseek (fd, offset, SEEK_SET); 680 lseek (fd, offset, SEEK_SET);
519 res = read (fd, buf, count); 681 res = read (fd, buf, count);
520 lseek (fd, ooffset, SEEK_SET); 682 lseek (fd, ooffset, SEEK_SET);
521 pthread_mutex_unlock (&preadwritelock); 683 UNLOCK (preadwritelock);
522 684
523 return res; 685 return res;
524} 686}
525 687
526static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 688static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
527{ 689{
528 ssize_t res; 690 ssize_t res;
529 off_t ooffset; 691 off_t ooffset;
530 692
531 pthread_mutex_lock (&preadwritelock); 693 LOCK (preadwritelock);
532 ooffset = lseek (fd, 0, SEEK_CUR); 694 ooffset = lseek (fd, 0, SEEK_CUR);
533 lseek (fd, offset, SEEK_SET); 695 lseek (fd, offset, SEEK_SET);
534 res = write (fd, buf, count); 696 res = write (fd, buf, count);
535 lseek (fd, offset, SEEK_SET); 697 lseek (fd, offset, SEEK_SET);
536 pthread_mutex_unlock (&preadwritelock); 698 UNLOCK (preadwritelock);
537 699
538 return res; 700 return res;
539} 701}
540#endif 702#endif
541 703
546#if !HAVE_READAHEAD 708#if !HAVE_READAHEAD
547# define readahead aio_readahead 709# define readahead aio_readahead
548 710
549static ssize_t readahead (int fd, off_t offset, size_t count) 711static ssize_t readahead (int fd, off_t offset, size_t count)
550{ 712{
551 char readahead_buf[4096]; 713 dBUF;
552 714
553 while (count > 0) 715 while (count > 0)
554 { 716 {
555 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 717 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
556 718
557 pread (fd, readahead_buf, len, offset); 719 pread (fd, aio_buf, len, offset);
558 offset += len; 720 offset += len;
559 count -= len; 721 count -= len;
560 } 722 }
561 723
562 errno = 0; 724 errno = 0;
571static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 733static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
572{ 734{
573 struct dirent *e; 735 struct dirent *e;
574 int errorno; 736 int errorno;
575 737
576 pthread_mutex_lock (&readdirlock); 738 LOCK (readdirlock);
577 739
578 e = readdir (dirp); 740 e = readdir (dirp);
579 errorno = errno; 741 errorno = errno;
580 742
581 if (e) 743 if (e)
584 strcpy (ent->d_name, e->d_name); 746 strcpy (ent->d_name, e->d_name);
585 } 747 }
586 else 748 else
587 *res = 0; 749 *res = 0;
588 750
589 pthread_mutex_unlock (&readdirlock); 751 UNLOCK (readdirlock);
590 752
591 errno = errorno; 753 errno = errorno;
592 return e ? 0 : -1; 754 return e ? 0 : -1;
593} 755}
594#endif 756#endif
595 757
596/* sendfile always needs emulation */ 758/* sendfile always needs emulation */
597static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 759static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
598{ 760{
599 ssize_t res; 761 ssize_t res;
600 762
601 if (!count) 763 if (!count)
602 return 0; 764 return 0;
651#endif 813#endif
652 ) 814 )
653 ) 815 )
654 { 816 {
655 /* emulate sendfile. this is a major pain in the ass */ 817 /* emulate sendfile. this is a major pain in the ass */
656 char buf[4096]; 818 dBUF;
819
657 res = 0; 820 res = 0;
658 821
659 while (count) 822 while (count)
660 { 823 {
661 ssize_t cnt; 824 ssize_t cnt;
662 825
663 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 826 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
664 827
665 if (cnt <= 0) 828 if (cnt <= 0)
666 { 829 {
667 if (cnt && !res) res = -1; 830 if (cnt && !res) res = -1;
668 break; 831 break;
669 } 832 }
670 833
671 cnt = write (ofd, buf, cnt); 834 cnt = write (ofd, aio_buf, cnt);
672 835
673 if (cnt <= 0) 836 if (cnt <= 0)
674 { 837 {
675 if (cnt && !res) res = -1; 838 if (cnt && !res) res = -1;
676 break; 839 break;
684 847
685 return res; 848 return res;
686} 849}
687 850
688/* read a full directory */ 851/* read a full directory */
689static int scandir_ (const char *path, void **namesp) 852static void scandir_ (aio_req req, worker *self)
690{ 853{
691 DIR *dirp = opendir (path); 854 DIR *dirp;
692 union 855 union
693 { 856 {
694 struct dirent d; 857 struct dirent d;
695 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 858 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
696 } u; 859 } *u;
697 struct dirent *entp; 860 struct dirent *entp;
698 char *name, *names; 861 char *name, *names;
699 int memlen = 4096; 862 int memlen = 4096;
700 int memofs = 0; 863 int memofs = 0;
701 int res = 0; 864 int res = 0;
702 int errorno; 865 int errorno;
703 866
704 if (!dirp) 867 LOCK (wrklock);
705 return -1; 868 self->dirp = dirp = opendir (req->dataptr);
869 self->dbuf = u = malloc (sizeof (*u));
870 UNLOCK (wrklock);
706 871
707 names = malloc (memlen); 872 req->data2ptr = names = malloc (memlen);
708 873
874 if (dirp && u && names)
709 for (;;) 875 for (;;)
710 { 876 {
877 errno = 0;
711 errno = 0, readdir_r (dirp, &u.d, &entp); 878 readdir_r (dirp, &u->d, &entp);
712 879
713 if (!entp) 880 if (!entp)
714 break; 881 break;
715 882
716 name = entp->d_name; 883 name = entp->d_name;
717 884
718 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 885 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
719 { 886 {
720 int len = strlen (name) + 1; 887 int len = strlen (name) + 1;
721 888
722 res++; 889 res++;
723 890
724 while (memofs + len > memlen) 891 while (memofs + len > memlen)
725 { 892 {
726 memlen *= 2; 893 memlen *= 2;
894 LOCK (wrklock);
727 names = realloc (names, memlen); 895 req->data2ptr = names = realloc (names, memlen);
896 UNLOCK (wrklock);
897
728 if (!names) 898 if (!names)
729 break; 899 break;
730 } 900 }
731 901
732 memcpy (names + memofs, name, len); 902 memcpy (names + memofs, name, len);
733 memofs += len; 903 memofs += len;
734 } 904 }
735 } 905 }
736 906
737 errorno = errno;
738 closedir (dirp);
739
740 if (errorno) 907 if (errno)
741 {
742 free (names);
743 errno = errorno;
744 res = -1; 908 res = -1;
745 } 909
746 910 req->result = res;
747 *namesp = (void *)names;
748 return res;
749} 911}
750 912
751/*****************************************************************************/ 913/*****************************************************************************/
752 914
753static void *aio_proc (void *thr_arg) 915static void *aio_proc (void *thr_arg)
754{ 916{
755 aio_req req; 917 aio_req req;
756 int type; 918 int type;
919 worker *self = (worker *)thr_arg;
757 920
758 do 921 do
759 { 922 {
760 pthread_mutex_lock (&reqlock); 923 LOCK (reqlock);
761 924
762 for (;;) 925 for (;;)
763 { 926 {
764 req = reqs; 927 self->req = req = reqq_shift (&req_queue);
765
766 if (reqs)
767 {
768 reqs = reqs->next;
769 if (!reqs) reqe = 0;
770 }
771 928
772 if (req) 929 if (req)
773 break; 930 break;
774 931
775 pthread_cond_wait (&reqwait, &reqlock); 932 pthread_cond_wait (&reqwait, &reqlock);
776 } 933 }
777 934
778 pthread_mutex_unlock (&reqlock); 935 UNLOCK (reqlock);
779 936
780 errno = 0; /* strictly unnecessary */ 937 errno = 0; /* strictly unnecessary */
938 type = req->type; /* remember type for QUIT check */
781 939
782 if (!req->cancelled) 940 if (!(req->flags & FLAG_CANCELLED))
783 switch (req->type) 941 switch (type)
784 { 942 {
785 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 943 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
786 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 944 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
787 945
788 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 946 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
789 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 947 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
790 948
791 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 949 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
792 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 950 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
793 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 951 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
794 952
800 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 958 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
801 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 959 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
802 960
803 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 961 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
804 case REQ_FSYNC: req->result = fsync (req->fd); break; 962 case REQ_FSYNC: req->result = fsync (req->fd); break;
805 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 963 case REQ_READDIR: scandir_ (req, self); break;
806 964
807 case REQ_SLEEP: 965 case REQ_BUSY:
808 { 966 {
809 struct timeval tv; 967 struct timeval tv;
810 968
811 tv.tv_sec = req->fd; 969 tv.tv_sec = req->fd;
812 tv.tv_usec = req->fd2; 970 tv.tv_usec = req->fd2;
813 971
814 req->result = select (0, 0, 0, 0, &tv); 972 req->result = select (0, 0, 0, 0, &tv);
815 } 973 }
816 974
975 case REQ_GROUP:
976 case REQ_NOP:
817 case REQ_QUIT: 977 case REQ_QUIT:
818 break; 978 break;
819 979
820 default: 980 default:
821 req->result = ENOSYS; 981 req->result = ENOSYS;
822 break; 982 break;
823 } 983 }
824 984
825 req->errorno = errno; 985 req->errorno = errno;
826 986
827 pthread_mutex_lock (&reslock); 987 LOCK (reslock);
828 988
829 req->next = 0; 989 if (!reqq_push (&res_queue, req))
830
831 if (rese)
832 {
833 rese->next = req;
834 rese = req;
835 }
836 else
837 {
838 rese = ress = req;
839
840 /* write a dummy byte to the pipe so fh becomes ready */ 990 /* write a dummy byte to the pipe so fh becomes ready */
841 write (respipe [1], &respipe, 1); 991 write (respipe [1], &respipe, 1);
842 }
843 992
844 pthread_mutex_unlock (&reslock); 993 self->req = 0;
994 worker_clear (self);
995
996 UNLOCK (reslock);
845 } 997 }
846 while (type != REQ_QUIT); 998 while (type != REQ_QUIT);
847 999
1000 LOCK (wrklock);
1001 worker_free (self);
1002 UNLOCK (wrklock);
1003
848 return 0; 1004 return 0;
849} 1005}
850 1006
851/*****************************************************************************/ 1007/*****************************************************************************/
852 1008
853static void atfork_prepare (void) 1009static void atfork_prepare (void)
854{ 1010{
855 pthread_mutex_lock (&reqlock); 1011 LOCK (wrklock);
856 pthread_mutex_lock (&reslock); 1012 LOCK (reqlock);
1013 LOCK (reslock);
857#if !HAVE_PREADWRITE 1014#if !HAVE_PREADWRITE
858 pthread_mutex_lock (&preadwritelock); 1015 LOCK (preadwritelock);
859#endif 1016#endif
860#if !HAVE_READDIR_R 1017#if !HAVE_READDIR_R
861 pthread_mutex_lock (&readdirlock); 1018 LOCK (readdirlock);
862#endif 1019#endif
863} 1020}
864 1021
865static void atfork_parent (void) 1022static void atfork_parent (void)
866{ 1023{
867#if !HAVE_READDIR_R 1024#if !HAVE_READDIR_R
868 pthread_mutex_unlock (&readdirlock); 1025 UNLOCK (readdirlock);
869#endif 1026#endif
870#if !HAVE_PREADWRITE 1027#if !HAVE_PREADWRITE
871 pthread_mutex_unlock (&preadwritelock); 1028 UNLOCK (preadwritelock);
872#endif 1029#endif
873 pthread_mutex_unlock (&reslock); 1030 UNLOCK (reslock);
874 pthread_mutex_unlock (&reqlock); 1031 UNLOCK (reqlock);
1032 UNLOCK (wrklock);
875} 1033}
876 1034
877static void atfork_child (void) 1035static void atfork_child (void)
878{ 1036{
879 aio_req prv; 1037 aio_req prv;
880 1038
1039 while (prv = reqq_shift (&req_queue))
1040 req_free (prv);
1041
1042 while (prv = reqq_shift (&res_queue))
1043 req_free (prv);
1044
1045 while (wrk_first.next != &wrk_first)
1046 {
1047 worker *wrk = wrk_first.next;
1048
1049 if (wrk->req)
1050 req_free (wrk->req);
1051
1052 worker_clear (wrk);
1053 worker_free (wrk);
1054 }
1055
881 started = 0; 1056 started = 0;
882 1057 nreqs = 0;
883 while (reqs)
884 {
885 prv = reqs;
886 reqs = prv->next;
887 req_free (prv);
888 }
889
890 reqs = reqe = 0;
891
892 while (ress)
893 {
894 prv = ress;
895 ress = prv->next;
896 req_free (prv);
897 }
898
899 ress = rese = 0;
900 1058
901 close (respipe [0]); 1059 close (respipe [0]);
902 close (respipe [1]); 1060 close (respipe [1]);
903 create_pipe (); 1061 create_pipe ();
904 1062
905 atfork_parent (); 1063 atfork_parent ();
906} 1064}
907 1065
908#define dREQ \ 1066#define dREQ \
909 aio_req req; \ 1067 aio_req req; \
1068 int req_pri = next_pri; \
1069 next_pri = DEFAULT_PRI + PRI_BIAS; \
910 \ 1070 \
911 if (SvOK (callback) && !SvROK (callback)) \ 1071 if (SvOK (callback) && !SvROK (callback)) \
912 croak ("callback must be undef or of reference type"); \ 1072 croak ("callback must be undef or of reference type"); \
913 \ 1073 \
914 Newz (0, req, 1, aio_cb); \ 1074 Newz (0, req, 1, aio_cb); \
915 if (!req) \ 1075 if (!req) \
916 croak ("out of memory during aio_req allocation"); \ 1076 croak ("out of memory during aio_req allocation"); \
917 \ 1077 \
918 req->callback = newSVsv (callback) 1078 req->callback = newSVsv (callback); \
1079 req->pri = req_pri
919 1080
920#define REQ_SEND \ 1081#define REQ_SEND \
921 req_send (req); \ 1082 req_send (req); \
922 \ 1083 \
923 if (GIMME_V != G_VOID) \ 1084 if (GIMME_V != G_VOID) \
1177 1338
1178 REQ_SEND; 1339 REQ_SEND;
1179} 1340}
1180 1341
1181void 1342void
1182aio_sleep (delay,callback=&PL_sv_undef) 1343aio_busy (delay,callback=&PL_sv_undef)
1183 double delay 1344 double delay
1184 SV * callback 1345 SV * callback
1185 PPCODE: 1346 PPCODE:
1186{ 1347{
1187 dREQ; 1348 dREQ;
1188 1349
1189 req->type = REQ_SLEEP; 1350 req->type = REQ_BUSY;
1190 req->fd = delay < 0. ? 0 : delay; 1351 req->fd = delay < 0. ? 0 : delay;
1191 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1352 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1192 1353
1193 REQ_SEND; 1354 REQ_SEND;
1194} 1355}
1198 SV * callback 1359 SV * callback
1199 PROTOTYPE: ;$ 1360 PROTOTYPE: ;$
1200 PPCODE: 1361 PPCODE:
1201{ 1362{
1202 dREQ; 1363 dREQ;
1364
1203 req->type = REQ_GROUP; 1365 req->type = REQ_GROUP;
1204 req_send (req); 1366 req_send (req);
1367
1205 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1368 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1206} 1369}
1370
1371void
1372aio_nop (callback=&PL_sv_undef)
1373 SV * callback
1374 PPCODE:
1375{
1376 dREQ;
1377
1378 req->type = REQ_NOP;
1379
1380 REQ_SEND;
1381}
1382
1383void
1384aioreq_pri (int pri = DEFAULT_PRI)
1385 CODE:
1386 if (pri < PRI_MIN) pri = PRI_MIN;
1387 if (pri > PRI_MAX) pri = PRI_MAX;
1388 next_pri = pri + PRI_BIAS;
1389
1390void
1391aioreq_nice (int nice = 0)
1392 CODE:
1393 nice = next_pri - nice;
1394 if (nice < PRI_MIN) nice = PRI_MIN;
1395 if (nice > PRI_MAX) nice = PRI_MAX;
1396 next_pri = nice + PRI_BIAS;
1207 1397
1208void 1398void
1209flush () 1399flush ()
1210 PROTOTYPE: 1400 PROTOTYPE:
1211 CODE: 1401 CODE:
1260 1450
1261MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1451MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1262 1452
1263void 1453void
1264cancel (aio_req_ornot req) 1454cancel (aio_req_ornot req)
1265 PROTOTYPE:
1266 CODE: 1455 CODE:
1267 req_cancel (req); 1456 req_cancel (req);
1268 1457
1458void
1459cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1460 CODE:
1461 SvREFCNT_dec (req->callback);
1462 req->callback = newSVsv (callback);
1463
1269MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1464MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1270 1465
1271void 1466void
1272add (aio_req grp, ...) 1467add (aio_req grp, ...)
1273 PPCODE: 1468 PPCODE:
1274{ 1469{
1275 int i; 1470 int i;
1471 aio_req req;
1276 1472
1277 if (grp->fd == 2) 1473 if (grp->fd == 2)
1278 croak ("cannot add requests to IO::AIO::GRP after the group finished"); 1474 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1279 1475
1280 for (i = 1; i < items; ++i ) 1476 for (i = 1; i < items; ++i )
1281 { 1477 {
1282 if (GIMME_V != G_VOID) 1478 if (GIMME_V != G_VOID)
1283 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1479 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1284 1480
1285 aio_req req = SvAIO_REQ (ST (i)); 1481 req = SvAIO_REQ (ST (i));
1286 1482
1287 if (req) 1483 if (req)
1288 { 1484 {
1289 ++grp->length; 1485 ++grp->length;
1290 req->grp = grp; 1486 req->grp = grp;
1300 } 1496 }
1301} 1497}
1302 1498
1303void 1499void
1304result (aio_req grp, ...) 1500result (aio_req grp, ...)
1305 CODE: 1501 CODE:
1306{ 1502{
1307 int i; 1503 int i;
1308 AV *av = newAV (); 1504 AV *av = newAV ();
1309 1505
1310 for (i = 1; i < items; ++i ) 1506 for (i = 1; i < items; ++i )
1311 av_push (av, newSVsv (ST (i))); 1507 av_push (av, newSVsv (ST (i)));
1312 1508
1313 SvREFCNT_dec (grp->data); 1509 SvREFCNT_dec (grp->data);
1314 grp->data = (SV *)av; 1510 grp->data = (SV *)av;
1315} 1511}
1316 1512
1317void 1513void
1318feeder_limit (aio_req grp, int limit) 1514limit (aio_req grp, int limit)
1319 CODE: 1515 CODE:
1320 grp->fd2 = limit; 1516 grp->fd2 = limit;
1321 aio_grp_feed (grp); 1517 aio_grp_feed (grp);
1322 1518
1323void 1519void
1324set_feeder (aio_req grp, SV *callback=&PL_sv_undef) 1520feed (aio_req grp, SV *callback=&PL_sv_undef)
1325 CODE: 1521 CODE:
1326{ 1522{
1327 SvREFCNT_dec (grp->fh2); 1523 SvREFCNT_dec (grp->fh2);
1328 grp->fh2 = newSVsv (callback); 1524 grp->fh2 = newSVsv (callback);
1329 1525

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines