ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.49 by root, Sun Oct 22 13:33:28 2006 UTC vs.
Revision 1.85 by root, Sat Oct 28 23:32:29 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
34# else 43# else
35# error sendfile support requested but not available 44# error sendfile support requested but not available
36# endif 45# endif
37#endif 46#endif
38 47
48/* number of seconds after which idle threads exit */
49#define IDLE_TIMEOUT 10
50
39/* used for struct dirent, AIX doesn't provide it */ 51/* used for struct dirent, AIX doesn't provide it */
40#ifndef NAME_MAX 52#ifndef NAME_MAX
41# define NAME_MAX 4096 53# define NAME_MAX 4096
42#endif 54#endif
43 55
56#ifndef PTHREAD_STACK_MIN
57/* care for broken platforms, e.g. windows */
58# define PTHREAD_STACK_MIN 16384
59#endif
60
44#if __ia64 61#if __ia64
45# define STACKSIZE 65536 62# define STACKSIZE 65536
63#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64# define STACKSIZE PTHREAD_STACK_MIN
46#else 65#else
47# define STACKSIZE 8192 66# define STACKSIZE 16384
67#endif
68
69/* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73#ifndef WORDACCESS_UNSAFE
74# if __i386 || __x86_64
75# define WORDACCESS_UNSAFE 0
76# else
77# define WORDACCESS_UNSAFE 1
48#endif 78# endif
79#endif
80
81/* buffer size for various temporary buffers */
82#define AIO_BUFSIZE 65536
83
84#define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
49 91
50enum { 92enum {
51 REQ_QUIT, 93 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 94 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
54 REQ_SENDFILE, 96 REQ_SENDFILE,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 98 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 100 REQ_MKNOD, REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 101 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 102 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 103 REQ_BUSY,
62}; 104};
63 105
64#define AIO_REQ_KLASS "IO::AIO::REQ" 106#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 107#define AIO_GRP_KLASS "IO::AIO::GRP"
66 108
67typedef struct aio_cb 109typedef struct aio_cb
68{ 110{
69 struct aio_cb *volatile next; 111 struct aio_cb *volatile next;
70
71 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 112
75 SV *data, *callback; 113 SV *data, *callback;
76 SV *fh, *fh2; 114 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 115 void *dataptr, *data2ptr;
78 Stat_t *statdata; 116 Stat_t *statdata;
79 off_t offset; 117 off_t offset;
80 size_t length; 118 size_t length;
81 ssize_t result; 119 ssize_t result;
82 120
121 STRLEN dataoffset;
83 int type; 122 int type;
84 int fd, fd2; 123 int fd, fd2;
85 int errorno; 124 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 125 mode_t mode; /* open */
126
88 unsigned char cancelled; 127 unsigned char flags;
128 unsigned char pri;
129
130 SV *self; /* the perl counterpart of this request, if any */
131 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 132} aio_cb;
133
134enum {
135 FLAG_CANCELLED = 0x01,
136};
90 137
91typedef aio_cb *aio_req; 138typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 139typedef aio_cb *aio_req_ornot;
93 140
141enum {
142 PRI_MIN = -4,
143 PRI_MAX = 4,
144
145 DEFAULT_PRI = 0,
146 PRI_BIAS = -PRI_MIN,
147 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
148};
149
150#define AIO_TICKS ((1000000 + 1023) >> 10)
151
152static unsigned int max_poll_time = 0;
153static unsigned int max_poll_reqs = 0;
154
155/* calculcate time difference in ~1/AIO_TICKS of a second */
156static int tvdiff (struct timeval *tv1, struct timeval *tv2)
157{
158 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
159 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
160}
161
162static int next_pri = DEFAULT_PRI + PRI_BIAS;
163
94static int started, wanted; 164static unsigned int started, idle, wanted;
95static volatile int nreqs; 165
96static int max_outstanding = 1<<30; 166#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
167# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
168#else
169# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
170#endif
171
172#define LOCK(mutex) pthread_mutex_lock (&(mutex))
173#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
174
175/* worker threads management */
176static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
177
178typedef struct worker {
179 /* locked by wrklock */
180 struct worker *prev, *next;
181
182 pthread_t tid;
183
184 /* locked by reslock, reqlock or wrklock */
185 aio_req req; /* currently processed request */
186 void *dbuf;
187 DIR *dirp;
188} worker;
189
190static worker wrk_first = { &wrk_first, &wrk_first, 0 };
191
192static void worker_clear (worker *wrk)
193{
194 if (wrk->dirp)
195 {
196 closedir (wrk->dirp);
197 wrk->dirp = 0;
198 }
199
200 if (wrk->dbuf)
201 {
202 free (wrk->dbuf);
203 wrk->dbuf = 0;
204 }
205}
206
207static void worker_free (worker *wrk)
208{
209 wrk->next->prev = wrk->prev;
210 wrk->prev->next = wrk->next;
211
212 free (wrk);
213}
214
215static volatile unsigned int nreqs, nready, npending;
216static volatile unsigned int max_idle = 4;
217static volatile unsigned int max_outstanding = 0xffffffff;
97static int respipe [2]; 218static int respipe [2];
98 219
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 220static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 221static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 222static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 223
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 224#if WORDACCESS_UNSAFE
104static volatile aio_req ress, rese; /* queue start, queue end */
105 225
226static unsigned int get_nready ()
227{
228 unsigned int retval;
229
230 LOCK (reqlock);
231 retval = nready;
232 UNLOCK (reqlock);
233
234 return retval;
235}
236
237static unsigned int get_npending ()
238{
239 unsigned int retval;
240
241 LOCK (reslock);
242 retval = npending;
243 UNLOCK (reslock);
244
245 return retval;
246}
247
248static unsigned int get_nthreads ()
249{
250 unsigned int retval;
251
252 LOCK (wrklock);
253 retval = started;
254 UNLOCK (wrklock);
255
256 return retval;
257}
258
259#else
260
261# define get_nready() nready
262# define get_npending() npending
263# define get_nthreads() started
264
265#endif
266
267/*
268 * a somewhat faster data structure might be nice, but
269 * with 8 priorities this actually needs <20 insns
270 * per shift, the most expensive operation.
271 */
272typedef struct {
273 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
274 int size;
275} reqq;
276
277static reqq req_queue;
278static reqq res_queue;
279
280int reqq_push (reqq *q, aio_req req)
281{
282 int pri = req->pri;
283 req->next = 0;
284
285 if (q->qe[pri])
286 {
287 q->qe[pri]->next = req;
288 q->qe[pri] = req;
289 }
290 else
291 q->qe[pri] = q->qs[pri] = req;
292
293 return q->size++;
294}
295
296aio_req reqq_shift (reqq *q)
297{
298 int pri;
299
300 if (!q->size)
301 return 0;
302
303 --q->size;
304
305 for (pri = NUM_PRI; pri--; )
306 {
307 aio_req req = q->qs[pri];
308
309 if (req)
310 {
311 if (!(q->qs[pri] = req->next))
312 q->qe[pri] = 0;
313
314 return req;
315 }
316 }
317
318 abort ();
319}
320
321static int poll_cb ();
322static void req_invoke (aio_req req);
106static void req_free (aio_req req); 323static void req_free (aio_req req);
324static void req_cancel (aio_req req);
107 325
108/* must be called at most once */ 326/* must be called at most once */
109static SV *req_sv (aio_req req, const char *klass) 327static SV *req_sv (aio_req req, const char *klass)
110{ 328{
111 if (!req->self) 329 if (!req->self)
117 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 335 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
118} 336}
119 337
120static aio_req SvAIO_REQ (SV *sv) 338static aio_req SvAIO_REQ (SV *sv)
121{ 339{
340 MAGIC *mg;
341
122 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 342 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
123 croak ("object of class " AIO_REQ_KLASS " expected"); 343 croak ("object of class " AIO_REQ_KLASS " expected");
124 344
125 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 345 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
126 346
127 return mg ? (aio_req)mg->mg_ptr : 0; 347 return mg ? (aio_req)mg->mg_ptr : 0;
128} 348}
129 349
130static void aio_grp_feed (aio_req grp) 350static void aio_grp_feed (aio_req grp)
131{ 351{
132 while (grp->length < grp->fd2) 352 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
133 { 353 {
134 int old_len = grp->length; 354 int old_len = grp->length;
135 355
136 if (grp->fh2 && SvOK (grp->fh2)) 356 if (grp->fh2 && SvOK (grp->fh2))
137 { 357 {
140 ENTER; 360 ENTER;
141 SAVETMPS; 361 SAVETMPS;
142 PUSHMARK (SP); 362 PUSHMARK (SP);
143 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 363 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
144 PUTBACK; 364 PUTBACK;
145 call_sv (grp->fh2, G_VOID | G_EVAL); 365 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
146 SPAGAIN; 366 SPAGAIN;
147 FREETMPS; 367 FREETMPS;
148 LEAVE; 368 LEAVE;
149 } 369 }
150 370
156 break; 376 break;
157 } 377 }
158 } 378 }
159} 379}
160 380
161static void poll_wait () 381static void aio_grp_dec (aio_req grp)
162{ 382{
163 if (nreqs && !ress) 383 --grp->length;
164 {
165 fd_set rfd;
166 FD_ZERO(&rfd);
167 FD_SET(respipe [0], &rfd);
168 384
169 select (respipe [0] + 1, &rfd, 0, 0, 0); 385 /* call feeder, if applicable */
386 aio_grp_feed (grp);
387
388 /* finish, if done */
389 if (!grp->length && grp->fd)
390 {
391 req_invoke (grp);
392 req_free (grp);
170 } 393 }
171} 394}
172 395
173static void req_invoke (aio_req req) 396static void req_invoke (aio_req req)
174{ 397{
175 dSP; 398 dSP;
176 int errorno = errno;
177 399
178 if (req->cancelled || !SvOK (req->callback)) 400 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
179 return; 401 {
180
181 errno = req->errorno;
182
183 ENTER; 402 ENTER;
184 SAVETMPS; 403 SAVETMPS;
185 PUSHMARK (SP); 404 PUSHMARK (SP);
186 EXTEND (SP, 1); 405 EXTEND (SP, 1);
187 406
188 switch (req->type) 407 switch (req->type)
189 {
190 case REQ_READDIR:
191 { 408 {
192 SV *rv = &PL_sv_undef; 409 case REQ_READDIR:
193
194 if (req->result >= 0)
195 { 410 {
196 char *buf = req->data2ptr; 411 SV *rv = &PL_sv_undef;
197 AV *av = newAV ();
198 412
199 while (req->result) 413 if (req->result >= 0)
200 { 414 {
415 int i;
416 char *buf = req->data2ptr;
417 AV *av = newAV ();
418
419 av_extend (av, req->result - 1);
420
421 for (i = 0; i < req->result; ++i)
422 {
201 SV *sv = newSVpv (buf, 0); 423 SV *sv = newSVpv (buf, 0);
202 424
203 av_push (av, sv); 425 av_store (av, i, sv);
204 buf += SvCUR (sv) + 1; 426 buf += SvCUR (sv) + 1;
205 req->result--; 427 }
428
429 rv = sv_2mortal (newRV_noinc ((SV *)av));
206 } 430 }
207 431
208 rv = sv_2mortal (newRV_noinc ((SV *)av)); 432 PUSHs (rv);
209 } 433 }
434 break;
210 435
211 PUSHs (rv); 436 case REQ_OPEN:
437 {
438 /* convert fd to fh */
439 SV *fh;
440
441 PUSHs (sv_2mortal (newSViv (req->result)));
442 PUTBACK;
443 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
444 SPAGAIN;
445
446 fh = SvREFCNT_inc (POPs);
447
448 PUSHMARK (SP);
449 XPUSHs (sv_2mortal (fh));
450 }
451 break;
452
453 case REQ_GROUP:
454 req->fd = 2; /* mark group as finished */
455
456 if (req->data)
457 {
458 int i;
459 AV *av = (AV *)req->data;
460
461 EXTEND (SP, AvFILL (av) + 1);
462 for (i = 0; i <= AvFILL (av); ++i)
463 PUSHs (*av_fetch (av, i, 0));
464 }
465 break;
466
467 case REQ_NOP:
468 case REQ_BUSY:
469 break;
470
471 default:
472 PUSHs (sv_2mortal (newSViv (req->result)));
473 break;
212 } 474 }
213 break;
214 475
215 case REQ_OPEN: 476 errno = req->errorno;
216 {
217 /* convert fd to fh */
218 SV *fh;
219 477
220 PUSHs (sv_2mortal (newSViv (req->result)));
221 PUTBACK; 478 PUTBACK;
222 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
223 SPAGAIN;
224
225 fh = SvREFCNT_inc (POPs);
226
227 PUSHMARK (SP);
228 XPUSHs (sv_2mortal (fh));
229 }
230 break;
231
232 case REQ_GROUP:
233 req->fd = 2; /* mark group as finished */
234
235 if (req->data)
236 {
237 int i;
238 AV *av = (AV *)req->data;
239
240 EXTEND (SP, AvFILL (av) + 1);
241 for (i = 0; i <= AvFILL (av); ++i)
242 PUSHs (*av_fetch (av, i, 0));
243 }
244 break;
245
246 case REQ_SLEEP:
247 break;
248
249 default:
250 PUSHs (sv_2mortal (newSViv (req->result)));
251 break;
252 }
253
254
255 PUTBACK;
256 call_sv (req->callback, G_VOID | G_EVAL); 479 call_sv (req->callback, G_VOID | G_EVAL);
257 SPAGAIN; 480 SPAGAIN;
258 481
259 if (SvTRUE (ERRSV))
260 {
261 req_free (req);
262 croak (0);
263 }
264
265 FREETMPS; 482 FREETMPS;
266 LEAVE; 483 LEAVE;
484 }
267 485
268 errno = errorno;
269}
270
271static void req_free (aio_req req)
272{
273 if (req->grp) 486 if (req->grp)
274 { 487 {
275 aio_req grp = req->grp; 488 aio_req grp = req->grp;
276 489
277 /* unlink request */ 490 /* unlink request */
279 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next; 492 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
280 493
281 if (grp->grp_first == req) 494 if (grp->grp_first == req)
282 grp->grp_first = req->grp_next; 495 grp->grp_first = req->grp_next;
283 496
284 --grp->length;
285
286 /* call feeder, if applicable */
287 aio_grp_feed (grp); 497 aio_grp_dec (grp);
498 }
288 499
289 /* finish, if done */ 500 if (SvTRUE (ERRSV))
290 if (!grp->length && grp->fd) 501 {
291 {
292 req_invoke (grp);
293 req_free (grp); 502 req_free (req);
294 } 503 croak (0);
295 } 504 }
505}
296 506
507static void req_free (aio_req req)
508{
297 if (req->self) 509 if (req->self)
298 { 510 {
299 sv_unmagic (req->self, PERL_MAGIC_ext); 511 sv_unmagic (req->self, PERL_MAGIC_ext);
300 SvREFCNT_dec (req->self); 512 SvREFCNT_dec (req->self);
301 } 513 }
304 SvREFCNT_dec (req->fh); 516 SvREFCNT_dec (req->fh);
305 SvREFCNT_dec (req->fh2); 517 SvREFCNT_dec (req->fh2);
306 SvREFCNT_dec (req->callback); 518 SvREFCNT_dec (req->callback);
307 Safefree (req->statdata); 519 Safefree (req->statdata);
308 520
309 if (req->type == REQ_READDIR && req->result >= 0) 521 if (req->type == REQ_READDIR)
310 free (req->data2ptr); 522 free (req->data2ptr);
311 523
312 Safefree (req); 524 Safefree (req);
313} 525}
314 526
527static void req_cancel_subs (aio_req grp)
528{
529 aio_req sub;
530
531 if (grp->type != REQ_GROUP)
532 return;
533
534 SvREFCNT_dec (grp->fh2);
535 grp->fh2 = 0;
536
537 for (sub = grp->grp_first; sub; sub = sub->grp_next)
538 req_cancel (sub);
539}
540
315static void req_cancel (aio_req req) 541static void req_cancel (aio_req req)
316{ 542{
317 req->cancelled = 1; 543 req->flags |= FLAG_CANCELLED;
318 544
319 if (req->type == REQ_GROUP) 545 req_cancel_subs (req);
320 {
321 aio_req sub;
322
323 for (sub = req->grp_first; sub; sub = sub->grp_next)
324 req_cancel (sub);
325 }
326}
327
328static int poll_cb ()
329{
330 dSP;
331 int count = 0;
332 int do_croak = 0;
333 aio_req req;
334
335 for (;;)
336 {
337 pthread_mutex_lock (&reslock);
338 req = ress;
339
340 if (req)
341 {
342 ress = req->next;
343
344 if (!ress)
345 {
346 /* read any signals sent by the worker threads */
347 char buf [32];
348 while (read (respipe [0], buf, 32) == 32)
349 ;
350
351 rese = 0;
352 }
353 }
354
355 pthread_mutex_unlock (&reslock);
356
357 if (!req)
358 break;
359
360 nreqs--;
361
362 if (req->type == REQ_QUIT)
363 started--;
364 else if (req->type == REQ_GROUP && req->length)
365 {
366 req->fd = 1; /* mark request as delayed */
367 continue;
368 }
369 else
370 {
371 if (req->type == REQ_READ)
372 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
373
374 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
375 SvREADONLY_off (req->data);
376
377 if (req->statdata)
378 {
379 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
380 PL_laststatval = req->result;
381 PL_statcache = *(req->statdata);
382 }
383
384 req_invoke (req);
385
386 count++;
387 }
388
389 req_free (req);
390 }
391
392 return count;
393} 546}
394 547
395static void *aio_proc(void *arg); 548static void *aio_proc(void *arg);
396 549
397static void start_thread (void) 550static void start_thread (void)
398{ 551{
399 sigset_t fullsigset, oldsigset; 552 sigset_t fullsigset, oldsigset;
400 pthread_t tid;
401 pthread_attr_t attr; 553 pthread_attr_t attr;
554
555 worker *wrk = calloc (1, sizeof (worker));
556
557 if (!wrk)
558 croak ("unable to allocate worker thread data");
402 559
403 pthread_attr_init (&attr); 560 pthread_attr_init (&attr);
404 pthread_attr_setstacksize (&attr, STACKSIZE); 561 pthread_attr_setstacksize (&attr, STACKSIZE);
405 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 562 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
563#ifdef PTHREAD_SCOPE_PROCESS
564 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
565#endif
406 566
407 sigfillset (&fullsigset); 567 sigfillset (&fullsigset);
568
569 LOCK (wrklock);
408 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 570 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
409 571
410 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 572 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
573 {
574 wrk->prev = &wrk_first;
575 wrk->next = wrk_first.next;
576 wrk_first.next->prev = wrk;
577 wrk_first.next = wrk;
411 started++; 578 ++started;
579 }
580 else
581 free (wrk);
412 582
413 sigprocmask (SIG_SETMASK, &oldsigset, 0); 583 sigprocmask (SIG_SETMASK, &oldsigset, 0);
584 UNLOCK (wrklock);
585}
586
587static void maybe_start_thread ()
588{
589 if (get_nthreads () >= wanted)
590 return;
591
592 /* todo: maybe use idle here, but might be less exact */
593 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
594 return;
595
596 start_thread ();
414} 597}
415 598
416static void req_send (aio_req req) 599static void req_send (aio_req req)
417{ 600{
418 while (started < wanted && nreqs >= started)
419 start_thread ();
420
421 nreqs++; 601 ++nreqs;
422 602
423 pthread_mutex_lock (&reqlock); 603 LOCK (reqlock);
424 604 ++nready;
425 req->next = 0; 605 reqq_push (&req_queue, req);
426
427 if (reqe)
428 {
429 reqe->next = req;
430 reqe = req;
431 }
432 else
433 reqe = reqs = req;
434
435 pthread_cond_signal (&reqwait); 606 pthread_cond_signal (&reqwait);
436 pthread_mutex_unlock (&reqlock); 607 UNLOCK (reqlock);
437 608
438 if (nreqs > max_outstanding) 609 maybe_start_thread ();
439 for (;;)
440 {
441 poll_cb ();
442
443 if (nreqs <= max_outstanding)
444 break;
445
446 poll_wait ();
447 }
448} 610}
449 611
450static void end_thread (void) 612static void end_thread (void)
451{ 613{
452 aio_req req; 614 aio_req req;
615
453 Newz (0, req, 1, aio_cb); 616 Newz (0, req, 1, aio_cb);
617
454 req->type = REQ_QUIT; 618 req->type = REQ_QUIT;
619 req->pri = PRI_MAX + PRI_BIAS;
455 620
456 req_send (req); 621 LOCK (reqlock);
622 reqq_push (&req_queue, req);
623 pthread_cond_signal (&reqwait);
624 UNLOCK (reqlock);
625
626 LOCK (wrklock);
627 --started;
628 UNLOCK (wrklock);
629}
630
631static void set_max_idle (int nthreads)
632{
633 if (WORDACCESS_UNSAFE) LOCK (reqlock);
634 max_idle = nthreads <= 0 ? 1 : nthreads;
635 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
457} 636}
458 637
459static void min_parallel (int nthreads) 638static void min_parallel (int nthreads)
460{ 639{
461 if (wanted < nthreads) 640 if (wanted < nthreads)
462 wanted = nthreads; 641 wanted = nthreads;
463} 642}
464 643
465static void max_parallel (int nthreads) 644static void max_parallel (int nthreads)
466{ 645{
467 int cur = started;
468
469 if (wanted > nthreads) 646 if (wanted > nthreads)
470 wanted = nthreads; 647 wanted = nthreads;
471 648
472 while (cur > wanted)
473 {
474 end_thread ();
475 cur--;
476 }
477
478 while (started > wanted) 649 while (started > wanted)
650 end_thread ();
651}
652
653static void poll_wait ()
654{
655 fd_set rfd;
656
657 while (nreqs)
658 {
659 int size;
660 if (WORDACCESS_UNSAFE) LOCK (reslock);
661 size = res_queue.size;
662 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
663
664 if (size)
665 return;
666
667 maybe_start_thread ();
668
669 FD_ZERO(&rfd);
670 FD_SET(respipe [0], &rfd);
671
672 select (respipe [0] + 1, &rfd, 0, 0, 0);
479 { 673 }
674}
675
676static int poll_cb ()
677{
678 dSP;
679 int count = 0;
680 int maxreqs = max_poll_reqs;
681 int do_croak = 0;
682 struct timeval tv_start, tv_now;
683 aio_req req;
684
685 if (max_poll_time)
686 gettimeofday (&tv_start, 0);
687
688 for (;;)
689 {
690 for (;;)
691 {
692 maybe_start_thread ();
693
694 LOCK (reslock);
695 req = reqq_shift (&res_queue);
696
697 if (req)
698 {
699 --npending;
700
701 if (!res_queue.size)
702 {
703 /* read any signals sent by the worker threads */
704 char buf [32];
705 while (read (respipe [0], buf, 32) == 32)
706 ;
707 }
708 }
709
710 UNLOCK (reslock);
711
712 if (!req)
713 break;
714
715 --nreqs;
716
717 if (req->type == REQ_GROUP && req->length)
718 {
719 req->fd = 1; /* mark request as delayed */
720 continue;
721 }
722 else
723 {
724 if (req->type == REQ_READ)
725 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
726
727 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
728 SvREADONLY_off (req->data);
729
730 if (req->statdata)
731 {
732 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
733 PL_laststatval = req->result;
734 PL_statcache = *(req->statdata);
735 }
736
737 req_invoke (req);
738
739 count++;
740 }
741
742 req_free (req);
743
744 if (maxreqs && !--maxreqs)
745 break;
746
747 if (max_poll_time)
748 {
749 gettimeofday (&tv_now, 0);
750
751 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
752 break;
753 }
754 }
755
756 if (nreqs <= max_outstanding)
757 break;
758
480 poll_wait (); 759 poll_wait ();
481 poll_cb (); 760
761 ++maxreqs;
482 } 762 }
763
764 return count;
483} 765}
484 766
485static void create_pipe () 767static void create_pipe ()
486{ 768{
487 if (pipe (respipe)) 769 if (pipe (respipe))
511static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 793static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
512{ 794{
513 ssize_t res; 795 ssize_t res;
514 off_t ooffset; 796 off_t ooffset;
515 797
516 pthread_mutex_lock (&preadwritelock); 798 LOCK (preadwritelock);
517 ooffset = lseek (fd, 0, SEEK_CUR); 799 ooffset = lseek (fd, 0, SEEK_CUR);
518 lseek (fd, offset, SEEK_SET); 800 lseek (fd, offset, SEEK_SET);
519 res = read (fd, buf, count); 801 res = read (fd, buf, count);
520 lseek (fd, ooffset, SEEK_SET); 802 lseek (fd, ooffset, SEEK_SET);
521 pthread_mutex_unlock (&preadwritelock); 803 UNLOCK (preadwritelock);
522 804
523 return res; 805 return res;
524} 806}
525 807
526static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 808static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
527{ 809{
528 ssize_t res; 810 ssize_t res;
529 off_t ooffset; 811 off_t ooffset;
530 812
531 pthread_mutex_lock (&preadwritelock); 813 LOCK (preadwritelock);
532 ooffset = lseek (fd, 0, SEEK_CUR); 814 ooffset = lseek (fd, 0, SEEK_CUR);
533 lseek (fd, offset, SEEK_SET); 815 lseek (fd, offset, SEEK_SET);
534 res = write (fd, buf, count); 816 res = write (fd, buf, count);
535 lseek (fd, offset, SEEK_SET); 817 lseek (fd, offset, SEEK_SET);
536 pthread_mutex_unlock (&preadwritelock); 818 UNLOCK (preadwritelock);
537 819
538 return res; 820 return res;
539} 821}
540#endif 822#endif
541 823
542#if !HAVE_FDATASYNC 824#if !HAVE_FDATASYNC
543# define fdatasync fsync 825# define fdatasync fsync
544#endif 826#endif
545 827
546#if !HAVE_READAHEAD 828#if !HAVE_READAHEAD
547# define readahead aio_readahead 829# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
548 830
549static ssize_t readahead (int fd, off_t offset, size_t count) 831static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
550{ 832{
551 char readahead_buf[4096]; 833 dBUF;
552 834
553 while (count > 0) 835 while (count > 0)
554 { 836 {
555 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 837 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
556 838
557 pread (fd, readahead_buf, len, offset); 839 pread (fd, aio_buf, len, offset);
558 offset += len; 840 offset += len;
559 count -= len; 841 count -= len;
560 } 842 }
561 843
562 errno = 0; 844 errno = 0;
563} 845}
846
564#endif 847#endif
565 848
566#if !HAVE_READDIR_R 849#if !HAVE_READDIR_R
567# define readdir_r aio_readdir_r 850# define readdir_r aio_readdir_r
568 851
571static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 854static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
572{ 855{
573 struct dirent *e; 856 struct dirent *e;
574 int errorno; 857 int errorno;
575 858
576 pthread_mutex_lock (&readdirlock); 859 LOCK (readdirlock);
577 860
578 e = readdir (dirp); 861 e = readdir (dirp);
579 errorno = errno; 862 errorno = errno;
580 863
581 if (e) 864 if (e)
584 strcpy (ent->d_name, e->d_name); 867 strcpy (ent->d_name, e->d_name);
585 } 868 }
586 else 869 else
587 *res = 0; 870 *res = 0;
588 871
589 pthread_mutex_unlock (&readdirlock); 872 UNLOCK (readdirlock);
590 873
591 errno = errorno; 874 errno = errorno;
592 return e ? 0 : -1; 875 return e ? 0 : -1;
593} 876}
594#endif 877#endif
595 878
596/* sendfile always needs emulation */ 879/* sendfile always needs emulation */
597static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 880static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
598{ 881{
599 ssize_t res; 882 ssize_t res;
600 883
601 if (!count) 884 if (!count)
602 return 0; 885 return 0;
613 { 896 {
614 off_t sbytes; 897 off_t sbytes;
615 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 898 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
616 899
617 if (res < 0 && sbytes) 900 if (res < 0 && sbytes)
618 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 901 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
619 res = sbytes; 902 res = sbytes;
620 } 903 }
621 904
622# elif __hpux 905# elif __hpux
623 res = sendfile (ofd, ifd, offset, count, 0, 0); 906 res = sendfile (ofd, ifd, offset, count, 0, 0);
651#endif 934#endif
652 ) 935 )
653 ) 936 )
654 { 937 {
655 /* emulate sendfile. this is a major pain in the ass */ 938 /* emulate sendfile. this is a major pain in the ass */
656 char buf[4096]; 939 dBUF;
940
657 res = 0; 941 res = 0;
658 942
659 while (count) 943 while (count)
660 { 944 {
661 ssize_t cnt; 945 ssize_t cnt;
662 946
663 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 947 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
664 948
665 if (cnt <= 0) 949 if (cnt <= 0)
666 { 950 {
667 if (cnt && !res) res = -1; 951 if (cnt && !res) res = -1;
668 break; 952 break;
669 } 953 }
670 954
671 cnt = write (ofd, buf, cnt); 955 cnt = write (ofd, aio_buf, cnt);
672 956
673 if (cnt <= 0) 957 if (cnt <= 0)
674 { 958 {
675 if (cnt && !res) res = -1; 959 if (cnt && !res) res = -1;
676 break; 960 break;
684 968
685 return res; 969 return res;
686} 970}
687 971
688/* read a full directory */ 972/* read a full directory */
689static int scandir_ (const char *path, void **namesp) 973static void scandir_ (aio_req req, worker *self)
690{ 974{
691 DIR *dirp = opendir (path); 975 DIR *dirp;
692 union 976 union
693 { 977 {
694 struct dirent d; 978 struct dirent d;
695 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 979 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
696 } u; 980 } *u;
697 struct dirent *entp; 981 struct dirent *entp;
698 char *name, *names; 982 char *name, *names;
699 int memlen = 4096; 983 int memlen = 4096;
700 int memofs = 0; 984 int memofs = 0;
701 int res = 0; 985 int res = 0;
702 int errorno; 986 int errorno;
703 987
704 if (!dirp) 988 LOCK (wrklock);
705 return -1; 989 self->dirp = dirp = opendir (req->dataptr);
706 990 self->dbuf = u = malloc (sizeof (*u));
707 names = malloc (memlen); 991 req->data2ptr = names = malloc (memlen);
992 UNLOCK (wrklock);
993
994 if (dirp && u && names)
995 for (;;)
996 {
997 errno = 0;
998 readdir_r (dirp, &u->d, &entp);
999
1000 if (!entp)
1001 break;
1002
1003 name = entp->d_name;
1004
1005 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1006 {
1007 int len = strlen (name) + 1;
1008
1009 res++;
1010
1011 while (memofs + len > memlen)
1012 {
1013 memlen *= 2;
1014 LOCK (wrklock);
1015 req->data2ptr = names = realloc (names, memlen);
1016 UNLOCK (wrklock);
1017
1018 if (!names)
1019 break;
1020 }
1021
1022 memcpy (names + memofs, name, len);
1023 memofs += len;
1024 }
1025 }
1026
1027 if (errno)
1028 res = -1;
1029
1030 req->result = res;
1031}
1032
1033/*****************************************************************************/
1034
1035static void *aio_proc (void *thr_arg)
1036{
1037 aio_req req;
1038 struct timespec ts;
1039 worker *self = (worker *)thr_arg;
1040
1041 /* try to distribute timeouts somewhat evenly */
1042 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1043 * (1000000000UL / 1024UL);
708 1044
709 for (;;) 1045 for (;;)
710 { 1046 {
711 errno = 0, readdir_r (dirp, &u.d, &entp); 1047 ts.tv_sec = time (0) + IDLE_TIMEOUT;
712 1048
713 if (!entp) 1049 LOCK (reqlock);
714 break;
715
716 name = entp->d_name;
717
718 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
719 {
720 int len = strlen (name) + 1;
721
722 res++;
723
724 while (memofs + len > memlen)
725 {
726 memlen *= 2;
727 names = realloc (names, memlen);
728 if (!names)
729 break;
730 }
731
732 memcpy (names + memofs, name, len);
733 memofs += len;
734 }
735 }
736
737 errorno = errno;
738 closedir (dirp);
739
740 if (errorno)
741 {
742 free (names);
743 errno = errorno;
744 res = -1;
745 }
746
747 *namesp = (void *)names;
748 return res;
749}
750
751/*****************************************************************************/
752
753static void *aio_proc (void *thr_arg)
754{
755 aio_req req;
756 int type;
757
758 do
759 {
760 pthread_mutex_lock (&reqlock);
761 1050
762 for (;;) 1051 for (;;)
763 { 1052 {
764 req = reqs; 1053 self->req = req = reqq_shift (&req_queue);
765
766 if (reqs)
767 {
768 reqs = reqs->next;
769 if (!reqs) reqe = 0;
770 }
771 1054
772 if (req) 1055 if (req)
773 break; 1056 break;
774 1057
1058 ++idle;
1059
1060 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1061 == ETIMEDOUT)
1062 {
1063 if (idle > max_idle)
1064 {
1065 --idle;
1066 UNLOCK (reqlock);
1067 LOCK (wrklock);
1068 --started;
1069 UNLOCK (wrklock);
1070 goto quit;
1071 }
1072
1073 /* we are allowed to idle, so do so without any timeout */
775 pthread_cond_wait (&reqwait, &reqlock); 1074 pthread_cond_wait (&reqwait, &reqlock);
1075 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1076 }
1077
1078 --idle;
776 } 1079 }
777 1080
778 pthread_mutex_unlock (&reqlock); 1081 --nready;
1082
1083 UNLOCK (reqlock);
779 1084
780 errno = 0; /* strictly unnecessary */ 1085 errno = 0; /* strictly unnecessary */
781 1086
782 if (!req->cancelled) 1087 if (!(req->flags & FLAG_CANCELLED))
783 switch (req->type) 1088 switch (req->type)
784 { 1089 {
785 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1090 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
786 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1091 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
787 1092
788 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1093 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
789 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1094 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
790 1095
791 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1096 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
792 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1097 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
793 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1098 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
794 1099
797 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1102 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
798 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1103 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
799 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1104 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
800 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1105 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
801 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1106 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1107 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
802 1108
803 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1109 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
804 case REQ_FSYNC: req->result = fsync (req->fd); break; 1110 case REQ_FSYNC: req->result = fsync (req->fd); break;
805 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1111 case REQ_READDIR: scandir_ (req, self); break;
806 1112
807 case REQ_SLEEP: 1113 case REQ_BUSY:
808 { 1114 {
809 struct timeval tv; 1115 struct timeval tv;
810 1116
811 tv.tv_sec = req->fd; 1117 tv.tv_sec = req->fd;
812 tv.tv_usec = req->fd2; 1118 tv.tv_usec = req->fd2;
813 1119
814 req->result = select (0, 0, 0, 0, &tv); 1120 req->result = select (0, 0, 0, 0, &tv);
815 } 1121 }
816 1122
1123 case REQ_GROUP:
1124 case REQ_NOP:
1125 break;
1126
817 case REQ_QUIT: 1127 case REQ_QUIT:
818 break; 1128 goto quit;
819 1129
820 default: 1130 default:
821 req->result = ENOSYS; 1131 req->result = ENOSYS;
822 break; 1132 break;
823 } 1133 }
824 1134
825 req->errorno = errno; 1135 req->errorno = errno;
826 1136
827 pthread_mutex_lock (&reslock); 1137 LOCK (reslock);
828 1138
829 req->next = 0; 1139 ++npending;
830 1140
831 if (rese) 1141 if (!reqq_push (&res_queue, req))
832 {
833 rese->next = req;
834 rese = req;
835 }
836 else
837 {
838 rese = ress = req;
839
840 /* write a dummy byte to the pipe so fh becomes ready */ 1142 /* write a dummy byte to the pipe so fh becomes ready */
841 write (respipe [1], &respipe, 1); 1143 write (respipe [1], &respipe, 1);
842 }
843 1144
844 pthread_mutex_unlock (&reslock); 1145 self->req = 0;
1146 worker_clear (self);
1147
1148 UNLOCK (reslock);
845 } 1149 }
846 while (type != REQ_QUIT); 1150
1151quit:
1152 LOCK (wrklock);
1153 worker_free (self);
1154 UNLOCK (wrklock);
847 1155
848 return 0; 1156 return 0;
849} 1157}
850 1158
851/*****************************************************************************/ 1159/*****************************************************************************/
852 1160
853static void atfork_prepare (void) 1161static void atfork_prepare (void)
854{ 1162{
855 pthread_mutex_lock (&reqlock); 1163 LOCK (wrklock);
856 pthread_mutex_lock (&reslock); 1164 LOCK (reqlock);
1165 LOCK (reslock);
857#if !HAVE_PREADWRITE 1166#if !HAVE_PREADWRITE
858 pthread_mutex_lock (&preadwritelock); 1167 LOCK (preadwritelock);
859#endif 1168#endif
860#if !HAVE_READDIR_R 1169#if !HAVE_READDIR_R
861 pthread_mutex_lock (&readdirlock); 1170 LOCK (readdirlock);
862#endif 1171#endif
863} 1172}
864 1173
865static void atfork_parent (void) 1174static void atfork_parent (void)
866{ 1175{
867#if !HAVE_READDIR_R 1176#if !HAVE_READDIR_R
868 pthread_mutex_unlock (&readdirlock); 1177 UNLOCK (readdirlock);
869#endif 1178#endif
870#if !HAVE_PREADWRITE 1179#if !HAVE_PREADWRITE
871 pthread_mutex_unlock (&preadwritelock); 1180 UNLOCK (preadwritelock);
872#endif 1181#endif
873 pthread_mutex_unlock (&reslock); 1182 UNLOCK (reslock);
874 pthread_mutex_unlock (&reqlock); 1183 UNLOCK (reqlock);
1184 UNLOCK (wrklock);
875} 1185}
876 1186
877static void atfork_child (void) 1187static void atfork_child (void)
878{ 1188{
879 aio_req prv; 1189 aio_req prv;
880 1190
1191 while (prv = reqq_shift (&req_queue))
1192 req_free (prv);
1193
1194 while (prv = reqq_shift (&res_queue))
1195 req_free (prv);
1196
1197 while (wrk_first.next != &wrk_first)
1198 {
1199 worker *wrk = wrk_first.next;
1200
1201 if (wrk->req)
1202 req_free (wrk->req);
1203
1204 worker_clear (wrk);
1205 worker_free (wrk);
1206 }
1207
881 started = 0; 1208 started = 0;
882 1209 idle = 0;
883 while (reqs) 1210 nreqs = 0;
884 { 1211 nready = 0;
885 prv = reqs; 1212 npending = 0;
886 reqs = prv->next;
887 req_free (prv);
888 }
889
890 reqs = reqe = 0;
891
892 while (ress)
893 {
894 prv = ress;
895 ress = prv->next;
896 req_free (prv);
897 }
898
899 ress = rese = 0;
900 1213
901 close (respipe [0]); 1214 close (respipe [0]);
902 close (respipe [1]); 1215 close (respipe [1]);
903 create_pipe (); 1216 create_pipe ();
904 1217
905 atfork_parent (); 1218 atfork_parent ();
906} 1219}
907 1220
908#define dREQ \ 1221#define dREQ \
909 aio_req req; \ 1222 aio_req req; \
1223 int req_pri = next_pri; \
1224 next_pri = DEFAULT_PRI + PRI_BIAS; \
910 \ 1225 \
911 if (SvOK (callback) && !SvROK (callback)) \ 1226 if (SvOK (callback) && !SvROK (callback)) \
912 croak ("callback must be undef or of reference type"); \ 1227 croak ("callback must be undef or of reference type"); \
913 \ 1228 \
914 Newz (0, req, 1, aio_cb); \ 1229 Newz (0, req, 1, aio_cb); \
915 if (!req) \ 1230 if (!req) \
916 croak ("out of memory during aio_req allocation"); \ 1231 croak ("out of memory during aio_req allocation"); \
917 \ 1232 \
918 req->callback = newSVsv (callback) 1233 req->callback = newSVsv (callback); \
1234 req->pri = req_pri
919 1235
920#define REQ_SEND \ 1236#define REQ_SEND \
921 req_send (req); \ 1237 req_send (req); \
922 \ 1238 \
923 if (GIMME_V != G_VOID) \ 1239 if (GIMME_V != G_VOID) \
928PROTOTYPES: ENABLE 1244PROTOTYPES: ENABLE
929 1245
930BOOT: 1246BOOT:
931{ 1247{
932 HV *stash = gv_stashpv ("IO::AIO", 1); 1248 HV *stash = gv_stashpv ("IO::AIO", 1);
1249
933 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1250 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
934 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1251 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
935 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1252 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1253 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1254 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1255 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
936 1256
937 create_pipe (); 1257 create_pipe ();
938 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1258 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
939} 1259}
940 1260
941void 1261void
942min_parallel (nthreads) 1262max_poll_reqs (int nreqs)
943 int nthreads
944 PROTOTYPE: $ 1263 PROTOTYPE: $
1264 CODE:
1265 max_poll_reqs = nreqs;
945 1266
946void 1267void
947max_parallel (nthreads) 1268max_poll_time (double nseconds)
948 int nthreads
949 PROTOTYPE: $ 1269 PROTOTYPE: $
1270 CODE:
1271 max_poll_time = nseconds * AIO_TICKS;
1272
1273void
1274min_parallel (int nthreads)
1275 PROTOTYPE: $
1276
1277void
1278max_parallel (int nthreads)
1279 PROTOTYPE: $
1280
1281void
1282max_idle (int nthreads)
1283 PROTOTYPE: $
1284 CODE:
1285 set_max_idle (nthreads);
950 1286
951int 1287int
952max_outstanding (nreqs) 1288max_outstanding (int maxreqs)
953 int nreqs 1289 PROTOTYPE: $
954 PROTOTYPE: $
955 CODE: 1290 CODE:
956 RETVAL = max_outstanding; 1291 RETVAL = max_outstanding;
957 max_outstanding = nreqs; 1292 max_outstanding = maxreqs;
1293 OUTPUT:
1294 RETVAL
958 1295
959void 1296void
960aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1297aio_open (pathname,flags,mode,callback=&PL_sv_undef)
961 SV * pathname 1298 SV * pathname
962 int flags 1299 int flags
1177 1514
1178 REQ_SEND; 1515 REQ_SEND;
1179} 1516}
1180 1517
1181void 1518void
1519aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1520 SV * pathname
1521 SV * callback
1522 UV mode
1523 UV dev
1524 PPCODE:
1525{
1526 dREQ;
1527
1528 req->type = REQ_MKNOD;
1529 req->data = newSVsv (pathname);
1530 req->dataptr = SvPVbyte_nolen (req->data);
1531 req->mode = (mode_t)mode;
1532 req->offset = dev;
1533
1534 REQ_SEND;
1535}
1536
1537void
1182aio_sleep (delay,callback=&PL_sv_undef) 1538aio_busy (delay,callback=&PL_sv_undef)
1183 double delay 1539 double delay
1184 SV * callback 1540 SV * callback
1185 PPCODE: 1541 PPCODE:
1186{ 1542{
1187 dREQ; 1543 dREQ;
1188 1544
1189 req->type = REQ_SLEEP; 1545 req->type = REQ_BUSY;
1190 req->fd = delay < 0. ? 0 : delay; 1546 req->fd = delay < 0. ? 0 : delay;
1191 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1547 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1192 1548
1193 REQ_SEND; 1549 REQ_SEND;
1194} 1550}
1198 SV * callback 1554 SV * callback
1199 PROTOTYPE: ;$ 1555 PROTOTYPE: ;$
1200 PPCODE: 1556 PPCODE:
1201{ 1557{
1202 dREQ; 1558 dREQ;
1559
1203 req->type = REQ_GROUP; 1560 req->type = REQ_GROUP;
1204 req_send (req); 1561 req_send (req);
1562
1205 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1563 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1206} 1564}
1565
1566void
1567aio_nop (callback=&PL_sv_undef)
1568 SV * callback
1569 PPCODE:
1570{
1571 dREQ;
1572
1573 req->type = REQ_NOP;
1574
1575 REQ_SEND;
1576}
1577
1578int
1579aioreq_pri (int pri = 0)
1580 PROTOTYPE: ;$
1581 CODE:
1582 RETVAL = next_pri - PRI_BIAS;
1583 if (items > 0)
1584 {
1585 if (pri < PRI_MIN) pri = PRI_MIN;
1586 if (pri > PRI_MAX) pri = PRI_MAX;
1587 next_pri = pri + PRI_BIAS;
1588 }
1589 OUTPUT:
1590 RETVAL
1591
1592void
1593aioreq_nice (int nice = 0)
1594 CODE:
1595 nice = next_pri - nice;
1596 if (nice < PRI_MIN) nice = PRI_MIN;
1597 if (nice > PRI_MAX) nice = PRI_MAX;
1598 next_pri = nice + PRI_BIAS;
1207 1599
1208void 1600void
1209flush () 1601flush ()
1210 PROTOTYPE: 1602 PROTOTYPE:
1211 CODE: 1603 CODE:
1212 while (nreqs) 1604 while (nreqs)
1213 { 1605 {
1214 poll_wait (); 1606 poll_wait ();
1215 poll_cb (); 1607 poll_cb (0);
1216 } 1608 }
1217 1609
1218void 1610void
1219poll() 1611poll()
1220 PROTOTYPE: 1612 PROTOTYPE:
1221 CODE: 1613 CODE:
1222 if (nreqs) 1614 if (nreqs)
1223 { 1615 {
1224 poll_wait (); 1616 poll_wait ();
1225 poll_cb (); 1617 poll_cb (0);
1226 } 1618 }
1227 1619
1228int 1620int
1229poll_fileno() 1621poll_fileno()
1230 PROTOTYPE: 1622 PROTOTYPE:
1254 CODE: 1646 CODE:
1255 RETVAL = nreqs; 1647 RETVAL = nreqs;
1256 OUTPUT: 1648 OUTPUT:
1257 RETVAL 1649 RETVAL
1258 1650
1259PROTOTYPES: DISABLE 1651int
1260 1652nready()
1261MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1262
1263void
1264cancel (aio_req_ornot req)
1265 PROTOTYPE: 1653 PROTOTYPE:
1266 CODE: 1654 CODE:
1655 RETVAL = get_nready ();
1656 OUTPUT:
1657 RETVAL
1658
1659int
1660npending()
1661 PROTOTYPE:
1662 CODE:
1663 RETVAL = get_npending ();
1664 OUTPUT:
1665 RETVAL
1666
1667int
1668nthreads()
1669 PROTOTYPE:
1670 CODE:
1671 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1672 RETVAL = started;
1673 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1674 OUTPUT:
1675 RETVAL
1676
1677PROTOTYPES: DISABLE
1678
1679MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1680
1681void
1682cancel (aio_req_ornot req)
1683 CODE:
1267 req_cancel (req); 1684 req_cancel (req);
1685
1686void
1687cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1688 CODE:
1689 SvREFCNT_dec (req->callback);
1690 req->callback = newSVsv (callback);
1268 1691
1269MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1692MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1270 1693
1271void 1694void
1272add (aio_req grp, ...) 1695add (aio_req grp, ...)
1273 PPCODE: 1696 PPCODE:
1274{ 1697{
1275 int i; 1698 int i;
1699 aio_req req;
1276 1700
1277 if (grp->fd == 2) 1701 if (grp->fd == 2)
1278 croak ("cannot add requests to IO::AIO::GRP after the group finished"); 1702 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1279 1703
1280 for (i = 1; i < items; ++i ) 1704 for (i = 1; i < items; ++i )
1281 { 1705 {
1282 if (GIMME_V != G_VOID) 1706 if (GIMME_V != G_VOID)
1283 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1707 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1284 1708
1285 aio_req req = SvAIO_REQ (ST (i)); 1709 req = SvAIO_REQ (ST (i));
1286 1710
1287 if (req) 1711 if (req)
1288 { 1712 {
1289 ++grp->length; 1713 ++grp->length;
1290 req->grp = grp; 1714 req->grp = grp;
1299 } 1723 }
1300 } 1724 }
1301} 1725}
1302 1726
1303void 1727void
1728cancel_subs (aio_req_ornot req)
1729 CODE:
1730 req_cancel_subs (req);
1731
1732void
1304result (aio_req grp, ...) 1733result (aio_req grp, ...)
1305 CODE: 1734 CODE:
1306{ 1735{
1307 int i; 1736 int i;
1308 AV *av = newAV (); 1737 AV *av;
1309 1738
1739 grp->errorno = errno;
1740
1741 av = newAV ();
1742
1310 for (i = 1; i < items; ++i ) 1743 for (i = 1; i < items; ++i )
1311 av_push (av, newSVsv (ST (i))); 1744 av_push (av, newSVsv (ST (i)));
1312 1745
1313 SvREFCNT_dec (grp->data); 1746 SvREFCNT_dec (grp->data);
1314 grp->data = (SV *)av; 1747 grp->data = (SV *)av;
1315} 1748}
1316 1749
1317void 1750void
1751errno (aio_req grp, int errorno = errno)
1752 CODE:
1753 grp->errorno = errorno;
1754
1755void
1318feeder_limit (aio_req grp, int limit) 1756limit (aio_req grp, int limit)
1319 CODE: 1757 CODE:
1320 grp->fd2 = limit; 1758 grp->fd2 = limit;
1321 aio_grp_feed (grp); 1759 aio_grp_feed (grp);
1322 1760
1323void 1761void
1324set_feeder (aio_req grp, SV *callback=&PL_sv_undef) 1762feed (aio_req grp, SV *callback=&PL_sv_undef)
1325 CODE: 1763 CODE:
1326{ 1764{
1327 SvREFCNT_dec (grp->fh2); 1765 SvREFCNT_dec (grp->fh2);
1328 grp->fh2 = newSVsv (callback); 1766 grp->fh2 = newSVsv (callback);
1329 1767

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines