ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.48 by root, Sun Oct 22 10:33:19 2006 UTC vs.
Revision 1.74 by root, Wed Oct 25 17:57:30 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux
5# define _GNU_SOURCE
6#endif
7
1#define _REENTRANT 1 8#define _REENTRANT 1
9
2#include <errno.h> 10#include <errno.h>
3 11
4#include "EXTERN.h" 12#include "EXTERN.h"
5#include "perl.h" 13#include "perl.h"
6#include "XSUB.h" 14#include "XSUB.h"
41# define NAME_MAX 4096 49# define NAME_MAX 4096
42#endif 50#endif
43 51
44#if __ia64 52#if __ia64
45# define STACKSIZE 65536 53# define STACKSIZE 65536
54#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55# define STACKSIZE PTHREAD_STACK_MIN
46#else 56#else
47# define STACKSIZE 8192 57# define STACKSIZE 16384
48#endif 58#endif
59
60/* buffer size for various temporary buffers */
61#define AIO_BUFSIZE 65536
62
63#define dBUF \
64 char *aio_buf; \
65 LOCK (wrklock); \
66 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
67 UNLOCK (wrklock); \
68 if (!aio_buf) \
69 return -1;
49 70
50enum { 71enum {
51 REQ_QUIT, 72 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 73 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 74 REQ_READ, REQ_WRITE, REQ_READAHEAD,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 76 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 77 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 78 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 79 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 80 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 81 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 82 REQ_BUSY,
62}; 83};
63 84
64#define AIO_REQ_KLASS "IO::AIO::REQ" 85#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 86#define AIO_GRP_KLASS "IO::AIO::GRP"
66 87
67typedef struct aio_cb 88typedef struct aio_cb
68{ 89{
69 struct aio_cb *grp, *grp_prev, *grp_next;
70
71 struct aio_cb *volatile next; 90 struct aio_cb *volatile next;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 91
75 SV *data, *callback; 92 SV *data, *callback;
76 SV *fh, *fh2; 93 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 94 void *dataptr, *data2ptr;
78 Stat_t *statdata; 95 Stat_t *statdata;
79 off_t offset; 96 off_t offset;
80 size_t length; 97 size_t length;
81 ssize_t result; 98 ssize_t result;
82 99
100 STRLEN dataoffset;
83 int type; 101 int type;
84 int fd, fd2; 102 int fd, fd2;
85 int errorno; 103 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 104 mode_t mode; /* open */
105
88 unsigned char cancelled; 106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 111} aio_cb;
112
113enum {
114 FLAG_CANCELLED = 0x01,
115};
90 116
91typedef aio_cb *aio_req; 117typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 118typedef aio_cb *aio_req_ornot;
93 119
120enum {
121 PRI_MIN = -4,
122 PRI_MAX = 4,
123
124 DEFAULT_PRI = 0,
125 PRI_BIAS = -PRI_MIN,
126 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
127};
128
129static int next_pri = DEFAULT_PRI + PRI_BIAS;
130
94static int started, wanted; 131static int started, wanted;
95static volatile int nreqs; 132static volatile int nreqs;
96static int max_outstanding = 1<<30;
97static int respipe [2]; 133static int respipe [2];
98 134
135#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
136# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
137#else
138# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
139#endif
140
141#define LOCK(mutex) pthread_mutex_lock (&(mutex))
142#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
143
144/* worker threasd management */
145static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
146
147typedef struct worker {
148 /* locked by wrklock */
149 struct worker *prev, *next;
150
151 pthread_t tid;
152
153 /* locked by reslock, reqlock or wrklock */
154 aio_req req; /* currently processed request */
155 void *dbuf;
156 DIR *dirp;
157} worker;
158
159static worker wrk_first = { &wrk_first, &wrk_first, 0 };
160
161static void worker_clear (worker *wrk)
162{
163 if (wrk->dirp)
164 {
165 closedir (wrk->dirp);
166 wrk->dirp = 0;
167 }
168
169 if (wrk->dbuf)
170 {
171 free (wrk->dbuf);
172 wrk->dbuf = 0;
173 }
174}
175
176static void worker_free (worker *wrk)
177{
178 wrk->next->prev = wrk->prev;
179 wrk->prev->next = wrk->next;
180
181 free (wrk);
182}
183
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 184static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 185static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 186static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 187
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 188/*
104static volatile aio_req ress, rese; /* queue start, queue end */ 189 * a somewhat faster data structure might be nice, but
190 * with 8 priorities this actually needs <20 insns
191 * per shift, the most expensive operation.
192 */
193typedef struct {
194 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
195 int size;
196} reqq;
105 197
198static reqq req_queue;
199static reqq res_queue;
200
201int reqq_push (reqq *q, aio_req req)
202{
203 int pri = req->pri;
204 req->next = 0;
205
206 if (q->qe[pri])
207 {
208 q->qe[pri]->next = req;
209 q->qe[pri] = req;
210 }
211 else
212 q->qe[pri] = q->qs[pri] = req;
213
214 return q->size++;
215}
216
217aio_req reqq_shift (reqq *q)
218{
219 int pri;
220
221 if (!q->size)
222 return 0;
223
224 --q->size;
225
226 for (pri = NUM_PRI; pri--; )
227 {
228 aio_req req = q->qs[pri];
229
230 if (req)
231 {
232 if (!(q->qs[pri] = req->next))
233 q->qe[pri] = 0;
234
235 return req;
236 }
237 }
238
239 abort ();
240}
241
242static int poll_cb ();
243static void req_invoke (aio_req req);
106static void req_free (aio_req req); 244static void req_free (aio_req req);
245static void req_cancel (aio_req req);
107 246
108/* must be called at most once */ 247/* must be called at most once */
109static SV *req_sv (aio_req req, const char *klass) 248static SV *req_sv (aio_req req, const char *klass)
110{ 249{
250 if (!req->self)
251 {
111 req->self = (SV *)newHV (); 252 req->self = (SV *)newHV ();
112 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 253 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
254 }
113 255
114 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 256 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
115} 257}
116 258
117static aio_req SvAIO_REQ (SV *sv) 259static aio_req SvAIO_REQ (SV *sv)
118{ 260{
261 MAGIC *mg;
262
119 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 263 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
120 croak ("object of class " AIO_REQ_KLASS " expected"); 264 croak ("object of class " AIO_REQ_KLASS " expected");
121 265
122 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 266 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
123 267
124 return mg ? (aio_req)mg->mg_ptr : 0; 268 return mg ? (aio_req)mg->mg_ptr : 0;
125} 269}
126 270
271static void aio_grp_feed (aio_req grp)
272{
273 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
274 {
275 int old_len = grp->length;
276
277 if (grp->fh2 && SvOK (grp->fh2))
278 {
279 dSP;
280
281 ENTER;
282 SAVETMPS;
283 PUSHMARK (SP);
284 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
285 PUTBACK;
286 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
287 SPAGAIN;
288 FREETMPS;
289 LEAVE;
290 }
291
292 /* stop if no progress has been made */
293 if (old_len == grp->length)
294 {
295 SvREFCNT_dec (grp->fh2);
296 grp->fh2 = 0;
297 break;
298 }
299 }
300}
301
302static void aio_grp_dec (aio_req grp)
303{
304 --grp->length;
305
306 /* call feeder, if applicable */
307 aio_grp_feed (grp);
308
309 /* finish, if done */
310 if (!grp->length && grp->fd)
311 {
312 req_invoke (grp);
313 req_free (grp);
314 }
315}
316
127static void poll_wait () 317static void poll_wait ()
128{ 318{
129 if (nreqs && !ress)
130 {
131 fd_set rfd; 319 fd_set rfd;
320
321 while (nreqs)
322 {
323 int size;
324#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
325 LOCK (reslock);
326#endif
327 size = res_queue.size;
328#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
329 UNLOCK (reslock);
330#endif
331
332 if (size)
333 return;
334
132 FD_ZERO(&rfd); 335 FD_ZERO(&rfd);
133 FD_SET(respipe [0], &rfd); 336 FD_SET(respipe [0], &rfd);
134 337
135 select (respipe [0] + 1, &rfd, 0, 0, 0); 338 select (respipe [0] + 1, &rfd, 0, 0, 0);
136 } 339 }
137} 340}
138 341
139static void req_invoke (aio_req req) 342static void req_invoke (aio_req req)
140{ 343{
141 dSP; 344 dSP;
142 int errorno = errno;
143 345
144 if (req->cancelled || !SvOK (req->callback)) 346 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
145 return; 347 {
146
147 errno = req->errorno; 348 errno = req->errorno;
148 349
149 ENTER; 350 ENTER;
351 SAVETMPS;
150 PUSHMARK (SP); 352 PUSHMARK (SP);
151 EXTEND (SP, 1); 353 EXTEND (SP, 1);
152 354
153 switch (req->type) 355 switch (req->type)
154 {
155 case REQ_READDIR:
156 { 356 {
157 SV *rv = &PL_sv_undef; 357 case REQ_READDIR:
158
159 if (req->result >= 0)
160 { 358 {
161 char *buf = req->data2ptr; 359 SV *rv = &PL_sv_undef;
162 AV *av = newAV ();
163 360
164 while (req->result) 361 if (req->result >= 0)
165 { 362 {
363 int i;
364 char *buf = req->data2ptr;
365 AV *av = newAV ();
366
367 av_extend (av, req->result - 1);
368
369 for (i = 0; i < req->result; ++i)
370 {
166 SV *sv = newSVpv (buf, 0); 371 SV *sv = newSVpv (buf, 0);
167 372
168 av_push (av, sv); 373 av_store (av, i, sv);
169 buf += SvCUR (sv) + 1; 374 buf += SvCUR (sv) + 1;
170 req->result--; 375 }
376
377 rv = sv_2mortal (newRV_noinc ((SV *)av));
171 } 378 }
172 379
173 rv = sv_2mortal (newRV_noinc ((SV *)av)); 380 PUSHs (rv);
174 } 381 }
382 break;
175 383
176 PUSHs (rv); 384 case REQ_OPEN:
385 {
386 /* convert fd to fh */
387 SV *fh;
388
389 PUSHs (sv_2mortal (newSViv (req->result)));
390 PUTBACK;
391 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
392 SPAGAIN;
393
394 fh = SvREFCNT_inc (POPs);
395
396 PUSHMARK (SP);
397 XPUSHs (sv_2mortal (fh));
398 }
399 break;
400
401 case REQ_GROUP:
402 req->fd = 2; /* mark group as finished */
403
404 if (req->data)
405 {
406 int i;
407 AV *av = (AV *)req->data;
408
409 EXTEND (SP, AvFILL (av) + 1);
410 for (i = 0; i <= AvFILL (av); ++i)
411 PUSHs (*av_fetch (av, i, 0));
412 }
413 break;
414
415 case REQ_NOP:
416 case REQ_BUSY:
417 break;
418
419 default:
420 PUSHs (sv_2mortal (newSViv (req->result)));
421 break;
177 } 422 }
178 break;
179 423
180 case REQ_OPEN:
181 {
182 /* convert fd to fh */
183 SV *fh;
184 424
185 PUSHs (sv_2mortal (newSViv (req->result)));
186 PUTBACK; 425 PUTBACK;
187 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
188 SPAGAIN;
189
190 fh = SvREFCNT_inc (POPs);
191
192 PUSHMARK (SP);
193 XPUSHs (sv_2mortal (fh));
194 }
195 break;
196
197 case REQ_GROUP:
198 if (req->data)
199 {
200 int i;
201 AV *av = (AV *)req->data;
202
203 EXTEND (SP, AvFILL (av) + 1);
204 for (i = 0; i <= AvFILL (av); ++i)
205 PUSHs (*av_fetch (av, i, 0));
206 }
207 break;
208
209 case REQ_SLEEP:
210 break;
211
212 default:
213 PUSHs (sv_2mortal (newSViv (req->result)));
214 break;
215 }
216
217
218 PUTBACK;
219 call_sv (req->callback, G_VOID | G_EVAL); 426 call_sv (req->callback, G_VOID | G_EVAL);
220 SPAGAIN; 427 SPAGAIN;
428
429 FREETMPS;
430 LEAVE;
431 }
432
433 if (req->grp)
434 {
435 aio_req grp = req->grp;
436
437 /* unlink request */
438 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
439 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
440
441 if (grp->grp_first == req)
442 grp->grp_first = req->grp_next;
443
444 aio_grp_dec (grp);
445 }
221 446
222 if (SvTRUE (ERRSV)) 447 if (SvTRUE (ERRSV))
223 { 448 {
224 req_free (req); 449 req_free (req);
225 croak (0); 450 croak (0);
226 } 451 }
227
228 LEAVE;
229
230 errno = errorno;
231} 452}
232 453
233static void req_free (aio_req req) 454static void req_free (aio_req req)
234{ 455{
235 if (req->grp)
236 {
237 aio_req grp = req->grp;
238
239 /* unlink request */
240 req->grp_next->grp_prev = req->grp_prev;
241 req->grp_prev->grp_next = req->grp_next;
242
243 if (grp->grp_next == grp && grp->fd)
244 {
245 req_invoke (grp);
246 req_free (grp);
247 }
248 }
249
250 if (req->self) 456 if (req->self)
251 { 457 {
252 sv_unmagic (req->self, PERL_MAGIC_ext); 458 sv_unmagic (req->self, PERL_MAGIC_ext);
253 SvREFCNT_dec (req->self); 459 SvREFCNT_dec (req->self);
254 } 460 }
255 461
256 if (req->data)
257 SvREFCNT_dec (req->data); 462 SvREFCNT_dec (req->data);
258
259 if (req->fh)
260 SvREFCNT_dec (req->fh); 463 SvREFCNT_dec (req->fh);
261
262 if (req->fh2)
263 SvREFCNT_dec (req->fh2); 464 SvREFCNT_dec (req->fh2);
264
265 if (req->statdata)
266 Safefree (req->statdata);
267
268 if (req->callback)
269 SvREFCNT_dec (req->callback); 465 SvREFCNT_dec (req->callback);
466 Safefree (req->statdata);
270 467
271 if (req->type == REQ_READDIR && req->result >= 0) 468 if (req->type == REQ_READDIR)
272 free (req->data2ptr); 469 free (req->data2ptr);
273 470
274 Safefree (req); 471 Safefree (req);
275} 472}
276 473
474static void req_cancel_subs (aio_req grp)
475{
476 aio_req sub;
477
478 if (grp->type != REQ_GROUP)
479 return;
480
481 SvREFCNT_dec (grp->fh2);
482 grp->fh2 = 0;
483
484 for (sub = grp->grp_first; sub; sub = sub->grp_next)
485 req_cancel (sub);
486}
487
277static void req_cancel (aio_req req) 488static void req_cancel (aio_req req)
278{ 489{
279 req->cancelled = 1; 490 req->flags |= FLAG_CANCELLED;
280 491
281 if (req->type == REQ_GROUP) 492 req_cancel_subs (req);
282 {
283 aio_req sub;
284
285 for (sub = req->grp_next; sub != req; sub = sub->grp_next)
286 req_cancel (sub);
287 }
288} 493}
289 494
290static int poll_cb () 495static int poll_cb ()
291{ 496{
292 dSP; 497 dSP;
294 int do_croak = 0; 499 int do_croak = 0;
295 aio_req req; 500 aio_req req;
296 501
297 for (;;) 502 for (;;)
298 { 503 {
299 pthread_mutex_lock (&reslock); 504 LOCK (reslock);
300 req = ress; 505 req = reqq_shift (&res_queue);
301 506
302 if (req) 507 if (req)
303 { 508 {
304 ress = req->next;
305
306 if (!ress) 509 if (!res_queue.size)
307 { 510 {
308 /* read any signals sent by the worker threads */ 511 /* read any signals sent by the worker threads */
309 char buf [32]; 512 char buf [32];
310 while (read (respipe [0], buf, 32) == 32) 513 while (read (respipe [0], buf, 32) == 32)
311 ; 514 ;
312
313 rese = 0;
314 } 515 }
315 } 516 }
316 517
317 pthread_mutex_unlock (&reslock); 518 UNLOCK (reslock);
318 519
319 if (!req) 520 if (!req)
320 break; 521 break;
321 522
322 nreqs--; 523 --nreqs;
323 524
324 if (req->type == REQ_QUIT) 525 if (req->type == REQ_QUIT)
325 started--; 526 started--;
326 else if (req->type == REQ_GROUP && req->grp_next != req) 527 else if (req->type == REQ_GROUP && req->length)
327 { 528 {
328 req->fd = 1; /* mark request as delayed */ 529 req->fd = 1; /* mark request as delayed */
329 continue; 530 continue;
330 } 531 }
331 else 532 else
357static void *aio_proc(void *arg); 558static void *aio_proc(void *arg);
358 559
359static void start_thread (void) 560static void start_thread (void)
360{ 561{
361 sigset_t fullsigset, oldsigset; 562 sigset_t fullsigset, oldsigset;
362 pthread_t tid;
363 pthread_attr_t attr; 563 pthread_attr_t attr;
564
565 worker *wrk = calloc (1, sizeof (worker));
566
567 if (!wrk)
568 croak ("unable to allocate worker thread data");
364 569
365 pthread_attr_init (&attr); 570 pthread_attr_init (&attr);
366 pthread_attr_setstacksize (&attr, STACKSIZE); 571 pthread_attr_setstacksize (&attr, STACKSIZE);
367 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 572 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
368 573
369 sigfillset (&fullsigset); 574 sigfillset (&fullsigset);
575
576 LOCK (wrklock);
370 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 577 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
371 578
372 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 579 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
580 {
581 wrk->prev = &wrk_first;
582 wrk->next = wrk_first.next;
583 wrk_first.next->prev = wrk;
584 wrk_first.next = wrk;
373 started++; 585 started++;
586 }
587 else
588 free (wrk);
374 589
375 sigprocmask (SIG_SETMASK, &oldsigset, 0); 590 sigprocmask (SIG_SETMASK, &oldsigset, 0);
591 UNLOCK (wrklock);
376} 592}
377 593
378static void req_send (aio_req req) 594static void req_send (aio_req req)
379{ 595{
380 while (started < wanted && nreqs >= started) 596 while (started < wanted && nreqs >= started)
381 start_thread (); 597 start_thread ();
382 598
383 nreqs++; 599 ++nreqs;
384 600
385 pthread_mutex_lock (&reqlock); 601 LOCK (reqlock);
386 602 reqq_push (&req_queue, req);
387 req->next = 0;
388
389 if (reqe)
390 {
391 reqe->next = req;
392 reqe = req;
393 }
394 else
395 reqe = reqs = req;
396
397 pthread_cond_signal (&reqwait); 603 pthread_cond_signal (&reqwait);
398 pthread_mutex_unlock (&reqlock); 604 UNLOCK (reqlock);
399
400 if (nreqs > max_outstanding)
401 for (;;)
402 {
403 poll_cb ();
404
405 if (nreqs <= max_outstanding)
406 break;
407
408 poll_wait ();
409 }
410} 605}
411 606
412static void end_thread (void) 607static void end_thread (void)
413{ 608{
414 aio_req req; 609 aio_req req;
610
415 Newz (0, req, 1, aio_cb); 611 Newz (0, req, 1, aio_cb);
612
416 req->type = REQ_QUIT; 613 req->type = REQ_QUIT;
614 req->pri = PRI_MAX + PRI_BIAS;
417 615
418 req_send (req); 616 req_send (req);
419} 617}
420 618
421static void min_parallel (int nthreads) 619static void min_parallel (int nthreads)
473static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 671static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
474{ 672{
475 ssize_t res; 673 ssize_t res;
476 off_t ooffset; 674 off_t ooffset;
477 675
478 pthread_mutex_lock (&preadwritelock); 676 LOCK (preadwritelock);
479 ooffset = lseek (fd, 0, SEEK_CUR); 677 ooffset = lseek (fd, 0, SEEK_CUR);
480 lseek (fd, offset, SEEK_SET); 678 lseek (fd, offset, SEEK_SET);
481 res = read (fd, buf, count); 679 res = read (fd, buf, count);
482 lseek (fd, ooffset, SEEK_SET); 680 lseek (fd, ooffset, SEEK_SET);
483 pthread_mutex_unlock (&preadwritelock); 681 UNLOCK (preadwritelock);
484 682
485 return res; 683 return res;
486} 684}
487 685
488static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 686static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
489{ 687{
490 ssize_t res; 688 ssize_t res;
491 off_t ooffset; 689 off_t ooffset;
492 690
493 pthread_mutex_lock (&preadwritelock); 691 LOCK (preadwritelock);
494 ooffset = lseek (fd, 0, SEEK_CUR); 692 ooffset = lseek (fd, 0, SEEK_CUR);
495 lseek (fd, offset, SEEK_SET); 693 lseek (fd, offset, SEEK_SET);
496 res = write (fd, buf, count); 694 res = write (fd, buf, count);
497 lseek (fd, offset, SEEK_SET); 695 lseek (fd, offset, SEEK_SET);
498 pthread_mutex_unlock (&preadwritelock); 696 UNLOCK (preadwritelock);
499 697
500 return res; 698 return res;
501} 699}
502#endif 700#endif
503 701
508#if !HAVE_READAHEAD 706#if !HAVE_READAHEAD
509# define readahead aio_readahead 707# define readahead aio_readahead
510 708
511static ssize_t readahead (int fd, off_t offset, size_t count) 709static ssize_t readahead (int fd, off_t offset, size_t count)
512{ 710{
513 char readahead_buf[4096]; 711 dBUF;
514 712
515 while (count > 0) 713 while (count > 0)
516 { 714 {
517 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 715 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
518 716
519 pread (fd, readahead_buf, len, offset); 717 pread (fd, aio_buf, len, offset);
520 offset += len; 718 offset += len;
521 count -= len; 719 count -= len;
522 } 720 }
523 721
524 errno = 0; 722 errno = 0;
533static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 731static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
534{ 732{
535 struct dirent *e; 733 struct dirent *e;
536 int errorno; 734 int errorno;
537 735
538 pthread_mutex_lock (&readdirlock); 736 LOCK (readdirlock);
539 737
540 e = readdir (dirp); 738 e = readdir (dirp);
541 errorno = errno; 739 errorno = errno;
542 740
543 if (e) 741 if (e)
546 strcpy (ent->d_name, e->d_name); 744 strcpy (ent->d_name, e->d_name);
547 } 745 }
548 else 746 else
549 *res = 0; 747 *res = 0;
550 748
551 pthread_mutex_unlock (&readdirlock); 749 UNLOCK (readdirlock);
552 750
553 errno = errorno; 751 errno = errorno;
554 return e ? 0 : -1; 752 return e ? 0 : -1;
555} 753}
556#endif 754#endif
557 755
558/* sendfile always needs emulation */ 756/* sendfile always needs emulation */
559static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 757static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
560{ 758{
561 ssize_t res; 759 ssize_t res;
562 760
563 if (!count) 761 if (!count)
564 return 0; 762 return 0;
613#endif 811#endif
614 ) 812 )
615 ) 813 )
616 { 814 {
617 /* emulate sendfile. this is a major pain in the ass */ 815 /* emulate sendfile. this is a major pain in the ass */
618 char buf[4096]; 816 dBUF;
817
619 res = 0; 818 res = 0;
620 819
621 while (count) 820 while (count)
622 { 821 {
623 ssize_t cnt; 822 ssize_t cnt;
624 823
625 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 824 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
626 825
627 if (cnt <= 0) 826 if (cnt <= 0)
628 { 827 {
629 if (cnt && !res) res = -1; 828 if (cnt && !res) res = -1;
630 break; 829 break;
631 } 830 }
632 831
633 cnt = write (ofd, buf, cnt); 832 cnt = write (ofd, aio_buf, cnt);
634 833
635 if (cnt <= 0) 834 if (cnt <= 0)
636 { 835 {
637 if (cnt && !res) res = -1; 836 if (cnt && !res) res = -1;
638 break; 837 break;
646 845
647 return res; 846 return res;
648} 847}
649 848
650/* read a full directory */ 849/* read a full directory */
651static int scandir_ (const char *path, void **namesp) 850static void scandir_ (aio_req req, worker *self)
652{ 851{
653 DIR *dirp = opendir (path); 852 DIR *dirp;
654 union 853 union
655 { 854 {
656 struct dirent d; 855 struct dirent d;
657 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 856 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
658 } u; 857 } *u;
659 struct dirent *entp; 858 struct dirent *entp;
660 char *name, *names; 859 char *name, *names;
661 int memlen = 4096; 860 int memlen = 4096;
662 int memofs = 0; 861 int memofs = 0;
663 int res = 0; 862 int res = 0;
664 int errorno; 863 int errorno;
665 864
666 if (!dirp) 865 LOCK (wrklock);
667 return -1; 866 self->dirp = dirp = opendir (req->dataptr);
867 self->dbuf = u = malloc (sizeof (*u));
868 UNLOCK (wrklock);
668 869
669 names = malloc (memlen); 870 req->data2ptr = names = malloc (memlen);
670 871
872 if (dirp && u && names)
671 for (;;) 873 for (;;)
672 { 874 {
875 errno = 0;
673 errno = 0, readdir_r (dirp, &u.d, &entp); 876 readdir_r (dirp, &u->d, &entp);
674 877
675 if (!entp) 878 if (!entp)
676 break; 879 break;
677 880
678 name = entp->d_name; 881 name = entp->d_name;
679 882
680 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 883 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
681 { 884 {
682 int len = strlen (name) + 1; 885 int len = strlen (name) + 1;
683 886
684 res++; 887 res++;
685 888
686 while (memofs + len > memlen) 889 while (memofs + len > memlen)
687 { 890 {
688 memlen *= 2; 891 memlen *= 2;
892 LOCK (wrklock);
689 names = realloc (names, memlen); 893 req->data2ptr = names = realloc (names, memlen);
894 UNLOCK (wrklock);
895
690 if (!names) 896 if (!names)
691 break; 897 break;
692 } 898 }
693 899
694 memcpy (names + memofs, name, len); 900 memcpy (names + memofs, name, len);
695 memofs += len; 901 memofs += len;
696 } 902 }
697 } 903 }
698 904
699 errorno = errno;
700 closedir (dirp);
701
702 if (errorno) 905 if (errno)
703 {
704 free (names);
705 errno = errorno;
706 res = -1; 906 res = -1;
707 } 907
708 908 req->result = res;
709 *namesp = (void *)names;
710 return res;
711} 909}
712 910
713/*****************************************************************************/ 911/*****************************************************************************/
714 912
715static void *aio_proc (void *thr_arg) 913static void *aio_proc (void *thr_arg)
716{ 914{
717 aio_req req; 915 aio_req req;
718 int type; 916 int type;
917 worker *self = (worker *)thr_arg;
719 918
720 do 919 do
721 { 920 {
722 pthread_mutex_lock (&reqlock); 921 LOCK (reqlock);
723 922
724 for (;;) 923 for (;;)
725 { 924 {
726 req = reqs; 925 self->req = req = reqq_shift (&req_queue);
727
728 if (reqs)
729 {
730 reqs = reqs->next;
731 if (!reqs) reqe = 0;
732 }
733 926
734 if (req) 927 if (req)
735 break; 928 break;
736 929
737 pthread_cond_wait (&reqwait, &reqlock); 930 pthread_cond_wait (&reqwait, &reqlock);
738 } 931 }
739 932
740 pthread_mutex_unlock (&reqlock); 933 UNLOCK (reqlock);
741 934
742 errno = 0; /* strictly unnecessary */ 935 errno = 0; /* strictly unnecessary */
936 type = req->type; /* remember type for QUIT check */
743 937
744 if (!req->cancelled) 938 if (!(req->flags & FLAG_CANCELLED))
745 switch (req->type) 939 switch (type)
746 { 940 {
747 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 941 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
748 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 942 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
749 943
750 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 944 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
751 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 945 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
752 946
753 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 947 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
754 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 948 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
755 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 949 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
756 950
762 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 956 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
763 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 957 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
764 958
765 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 959 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
766 case REQ_FSYNC: req->result = fsync (req->fd); break; 960 case REQ_FSYNC: req->result = fsync (req->fd); break;
767 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 961 case REQ_READDIR: scandir_ (req, self); break;
768 962
769 case REQ_SLEEP: 963 case REQ_BUSY:
770 { 964 {
771 struct timeval tv; 965 struct timeval tv;
772 966
773 tv.tv_sec = req->fd; 967 tv.tv_sec = req->fd;
774 tv.tv_usec = req->fd2; 968 tv.tv_usec = req->fd2;
775 969
776 req->result = select (0, 0, 0, 0, &tv); 970 req->result = select (0, 0, 0, 0, &tv);
777 } 971 }
778 972
973 case REQ_GROUP:
974 case REQ_NOP:
779 case REQ_QUIT: 975 case REQ_QUIT:
780 break; 976 break;
781 977
782 default: 978 default:
783 req->result = ENOSYS; 979 req->result = ENOSYS;
784 break; 980 break;
785 } 981 }
786 982
787 req->errorno = errno; 983 req->errorno = errno;
788 984
789 pthread_mutex_lock (&reslock); 985 LOCK (reslock);
790 986
791 req->next = 0; 987 if (!reqq_push (&res_queue, req))
792
793 if (rese)
794 {
795 rese->next = req;
796 rese = req;
797 }
798 else
799 {
800 rese = ress = req;
801
802 /* write a dummy byte to the pipe so fh becomes ready */ 988 /* write a dummy byte to the pipe so fh becomes ready */
803 write (respipe [1], &respipe, 1); 989 write (respipe [1], &respipe, 1);
804 }
805 990
806 pthread_mutex_unlock (&reslock); 991 self->req = 0;
992 worker_clear (self);
993
994 UNLOCK (reslock);
807 } 995 }
808 while (type != REQ_QUIT); 996 while (type != REQ_QUIT);
809 997
998 LOCK (wrklock);
999 worker_free (self);
1000 UNLOCK (wrklock);
1001
810 return 0; 1002 return 0;
811} 1003}
812 1004
813/*****************************************************************************/ 1005/*****************************************************************************/
814 1006
815static void atfork_prepare (void) 1007static void atfork_prepare (void)
816{ 1008{
817 pthread_mutex_lock (&reqlock); 1009 LOCK (wrklock);
818 pthread_mutex_lock (&reslock); 1010 LOCK (reqlock);
1011 LOCK (reslock);
819#if !HAVE_PREADWRITE 1012#if !HAVE_PREADWRITE
820 pthread_mutex_lock (&preadwritelock); 1013 LOCK (preadwritelock);
821#endif 1014#endif
822#if !HAVE_READDIR_R 1015#if !HAVE_READDIR_R
823 pthread_mutex_lock (&readdirlock); 1016 LOCK (readdirlock);
824#endif 1017#endif
825} 1018}
826 1019
827static void atfork_parent (void) 1020static void atfork_parent (void)
828{ 1021{
829#if !HAVE_READDIR_R 1022#if !HAVE_READDIR_R
830 pthread_mutex_unlock (&readdirlock); 1023 UNLOCK (readdirlock);
831#endif 1024#endif
832#if !HAVE_PREADWRITE 1025#if !HAVE_PREADWRITE
833 pthread_mutex_unlock (&preadwritelock); 1026 UNLOCK (preadwritelock);
834#endif 1027#endif
835 pthread_mutex_unlock (&reslock); 1028 UNLOCK (reslock);
836 pthread_mutex_unlock (&reqlock); 1029 UNLOCK (reqlock);
1030 UNLOCK (wrklock);
837} 1031}
838 1032
839static void atfork_child (void) 1033static void atfork_child (void)
840{ 1034{
841 aio_req prv; 1035 aio_req prv;
842 1036
1037 while (prv = reqq_shift (&req_queue))
1038 req_free (prv);
1039
1040 while (prv = reqq_shift (&res_queue))
1041 req_free (prv);
1042
1043 while (wrk_first.next != &wrk_first)
1044 {
1045 worker *wrk = wrk_first.next;
1046
1047 if (wrk->req)
1048 req_free (wrk->req);
1049
1050 worker_clear (wrk);
1051 worker_free (wrk);
1052 }
1053
843 started = 0; 1054 started = 0;
844 1055 nreqs = 0;
845 while (reqs)
846 {
847 prv = reqs;
848 reqs = prv->next;
849 req_free (prv);
850 }
851
852 reqs = reqe = 0;
853
854 while (ress)
855 {
856 prv = ress;
857 ress = prv->next;
858 req_free (prv);
859 }
860
861 ress = rese = 0;
862 1056
863 close (respipe [0]); 1057 close (respipe [0]);
864 close (respipe [1]); 1058 close (respipe [1]);
865 create_pipe (); 1059 create_pipe ();
866 1060
867 atfork_parent (); 1061 atfork_parent ();
868} 1062}
869 1063
870#define dREQ \ 1064#define dREQ \
871 aio_req req; \ 1065 aio_req req; \
1066 int req_pri = next_pri; \
1067 next_pri = DEFAULT_PRI + PRI_BIAS; \
872 \ 1068 \
873 if (SvOK (callback) && !SvROK (callback)) \ 1069 if (SvOK (callback) && !SvROK (callback)) \
874 croak ("callback must be undef or of reference type"); \ 1070 croak ("callback must be undef or of reference type"); \
875 \ 1071 \
876 Newz (0, req, 1, aio_cb); \ 1072 Newz (0, req, 1, aio_cb); \
877 if (!req) \ 1073 if (!req) \
878 croak ("out of memory during aio_req allocation"); \ 1074 croak ("out of memory during aio_req allocation"); \
879 \ 1075 \
880 req->callback = newSVsv (callback) 1076 req->callback = newSVsv (callback); \
1077 req->pri = req_pri
881 1078
882#define REQ_SEND \ 1079#define REQ_SEND \
883 req_send (req); \ 1080 req_send (req); \
884 \ 1081 \
885 if (GIMME_V != G_VOID) \ 1082 if (GIMME_V != G_VOID) \
907 1104
908void 1105void
909max_parallel (nthreads) 1106max_parallel (nthreads)
910 int nthreads 1107 int nthreads
911 PROTOTYPE: $ 1108 PROTOTYPE: $
912
913int
914max_outstanding (nreqs)
915 int nreqs
916 PROTOTYPE: $
917 CODE:
918 RETVAL = max_outstanding;
919 max_outstanding = nreqs;
920 1109
921void 1110void
922aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1111aio_open (pathname,flags,mode,callback=&PL_sv_undef)
923 SV * pathname 1112 SV * pathname
924 int flags 1113 int flags
1139 1328
1140 REQ_SEND; 1329 REQ_SEND;
1141} 1330}
1142 1331
1143void 1332void
1144aio_sleep (delay,callback=&PL_sv_undef) 1333aio_busy (delay,callback=&PL_sv_undef)
1145 double delay 1334 double delay
1146 SV * callback 1335 SV * callback
1147 PPCODE: 1336 PPCODE:
1148{ 1337{
1149 dREQ; 1338 dREQ;
1150 1339
1151 req->type = REQ_SLEEP; 1340 req->type = REQ_BUSY;
1152 req->fd = delay < 0. ? 0 : delay; 1341 req->fd = delay < 0. ? 0 : delay;
1153 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1342 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1154 1343
1155 REQ_SEND; 1344 REQ_SEND;
1156} 1345}
1160 SV * callback 1349 SV * callback
1161 PROTOTYPE: ;$ 1350 PROTOTYPE: ;$
1162 PPCODE: 1351 PPCODE:
1163{ 1352{
1164 dREQ; 1353 dREQ;
1354
1165 req->type = REQ_GROUP; 1355 req->type = REQ_GROUP;
1166 req->grp_next = req;
1167 req->grp_prev = req;
1168
1169 req_send (req); 1356 req_send (req);
1357
1170 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1358 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1171} 1359}
1360
1361void
1362aio_nop (callback=&PL_sv_undef)
1363 SV * callback
1364 PPCODE:
1365{
1366 dREQ;
1367
1368 req->type = REQ_NOP;
1369
1370 REQ_SEND;
1371}
1372
1373void
1374aioreq_pri (int pri = DEFAULT_PRI)
1375 CODE:
1376 if (pri < PRI_MIN) pri = PRI_MIN;
1377 if (pri > PRI_MAX) pri = PRI_MAX;
1378 next_pri = pri + PRI_BIAS;
1379
1380void
1381aioreq_nice (int nice = 0)
1382 CODE:
1383 nice = next_pri - nice;
1384 if (nice < PRI_MIN) nice = PRI_MIN;
1385 if (nice > PRI_MAX) nice = PRI_MAX;
1386 next_pri = nice + PRI_BIAS;
1172 1387
1173void 1388void
1174flush () 1389flush ()
1175 PROTOTYPE: 1390 PROTOTYPE:
1176 CODE: 1391 CODE:
1225 1440
1226MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1441MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1227 1442
1228void 1443void
1229cancel (aio_req_ornot req) 1444cancel (aio_req_ornot req)
1230 PROTOTYPE:
1231 CODE: 1445 CODE:
1232 req_cancel (req); 1446 req_cancel (req);
1233 1447
1448void
1449cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1450 CODE:
1451 SvREFCNT_dec (req->callback);
1452 req->callback = newSVsv (callback);
1453
1234MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1454MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1235 1455
1236void 1456void
1237add (aio_req grp, ...) 1457add (aio_req grp, ...)
1238 PPCODE: 1458 PPCODE:
1239{ 1459{
1240 int i; 1460 int i;
1461 aio_req req;
1462
1463 if (grp->fd == 2)
1464 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1241 1465
1242 for (i = 1; i < items; ++i ) 1466 for (i = 1; i < items; ++i )
1243 { 1467 {
1244 if (GIMME_V != G_VOID) 1468 if (GIMME_V != G_VOID)
1245 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1469 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1246 1470
1247 aio_req req = SvAIO_REQ (ST (i)); 1471 req = SvAIO_REQ (ST (i));
1248 1472
1249 if (req) 1473 if (req)
1250 { 1474 {
1251 req->grp_prev = grp; 1475 ++grp->length;
1252 req->grp_next = grp->grp_next;
1253 grp->grp_next->grp_prev = req;
1254 grp->grp_next = req;
1255
1256 req->grp = grp; 1476 req->grp = grp;
1477
1478 req->grp_prev = 0;
1479 req->grp_next = grp->grp_first;
1480
1481 if (grp->grp_first)
1482 grp->grp_first->grp_prev = req;
1483
1484 grp->grp_first = req;
1257 } 1485 }
1258 } 1486 }
1259} 1487}
1260 1488
1261void 1489void
1490cancel_subs (aio_req_ornot req)
1491 CODE:
1492 req_cancel_subs (req);
1493
1494void
1262result (aio_req grp, ...) 1495result (aio_req grp, ...)
1496 CODE:
1497{
1498 int i;
1499 AV *av = newAV ();
1500
1501 for (i = 1; i < items; ++i )
1502 av_push (av, newSVsv (ST (i)));
1503
1504 SvREFCNT_dec (grp->data);
1505 grp->data = (SV *)av;
1506}
1507
1508void
1509limit (aio_req grp, int limit)
1263 CODE: 1510 CODE:
1264{ 1511 grp->fd2 = limit;
1265 int i; 1512 aio_grp_feed (grp);
1266 AV *av = newAV ();
1267 1513
1268 if (grp->data) 1514void
1515feed (aio_req grp, SV *callback=&PL_sv_undef)
1516 CODE:
1517{
1269 SvREFCNT_dec (grp->data); 1518 SvREFCNT_dec (grp->fh2);
1519 grp->fh2 = newSVsv (callback);
1270 1520
1271 for (i = 1; i < items; ++i ) 1521 if (grp->fd2 <= 0)
1272 av_push (av, newSVsv (ST (i))); 1522 grp->fd2 = 2;
1273 1523
1274 grp->data = (SV *)av; 1524 aio_grp_feed (grp);
1275} 1525}
1276 1526

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines