|
|
1 | #if __linux |
|
|
2 | # define _GNU_SOURCE |
|
|
3 | #endif |
|
|
4 | |
1 | #define _REENTRANT 1 |
5 | #define _REENTRANT 1 |
|
|
6 | |
2 | #include <errno.h> |
7 | #include <errno.h> |
3 | |
8 | |
4 | #include "EXTERN.h" |
9 | #include "EXTERN.h" |
5 | #include "perl.h" |
10 | #include "perl.h" |
6 | #include "XSUB.h" |
11 | #include "XSUB.h" |
… | |
… | |
41 | # define NAME_MAX 4096 |
46 | # define NAME_MAX 4096 |
42 | #endif |
47 | #endif |
43 | |
48 | |
44 | #if __ia64 |
49 | #if __ia64 |
45 | # define STACKSIZE 65536 |
50 | # define STACKSIZE 65536 |
|
|
51 | #elif __i386 || __x86_64 /* 16k is unreasonably high :( */ |
|
|
52 | # define STACKSIZE PTHREAD_STACK_MIN |
46 | #else |
53 | #else |
47 | # define STACKSIZE 8192 |
54 | # define STACKSIZE 16384 |
48 | #endif |
55 | #endif |
|
|
56 | |
|
|
57 | /* buffer size for various temporary buffers */ |
|
|
58 | #define AIO_BUFSIZE 65536 |
|
|
59 | |
|
|
60 | #define dBUF \ |
|
|
61 | char *aio_buf = malloc (AIO_BUFSIZE); \ |
|
|
62 | if (!aio_buf) \ |
|
|
63 | return -1; |
|
|
64 | |
|
|
65 | #define fBUF free (aio_buf) |
49 | |
66 | |
50 | enum { |
67 | enum { |
51 | REQ_QUIT, |
68 | REQ_QUIT, |
52 | REQ_OPEN, REQ_CLOSE, |
69 | REQ_OPEN, REQ_CLOSE, |
53 | REQ_READ, REQ_WRITE, REQ_READAHEAD, |
70 | REQ_READ, REQ_WRITE, REQ_READAHEAD, |
… | |
… | |
55 | REQ_STAT, REQ_LSTAT, REQ_FSTAT, |
72 | REQ_STAT, REQ_LSTAT, REQ_FSTAT, |
56 | REQ_FSYNC, REQ_FDATASYNC, |
73 | REQ_FSYNC, REQ_FDATASYNC, |
57 | REQ_UNLINK, REQ_RMDIR, REQ_RENAME, |
74 | REQ_UNLINK, REQ_RMDIR, REQ_RENAME, |
58 | REQ_READDIR, |
75 | REQ_READDIR, |
59 | REQ_LINK, REQ_SYMLINK, |
76 | REQ_LINK, REQ_SYMLINK, |
60 | REQ_SLEEP, |
77 | REQ_GROUP, REQ_NOP, |
61 | REQ_GROUP, |
78 | REQ_BUSY, |
62 | }; |
79 | }; |
63 | |
80 | |
64 | #define AIO_REQ_KLASS "IO::AIO::REQ" |
81 | #define AIO_REQ_KLASS "IO::AIO::REQ" |
65 | #define AIO_GRP_KLASS "IO::AIO::GRP" |
82 | #define AIO_GRP_KLASS "IO::AIO::GRP" |
66 | |
83 | |
67 | typedef struct aio_cb |
84 | typedef struct aio_cb |
68 | { |
85 | { |
69 | struct aio_cb *volatile next; |
86 | struct aio_cb *volatile next; |
70 | |
|
|
71 | struct aio_cb *grp, *grp_prev, *grp_next, *grp_first; |
|
|
72 | |
|
|
73 | SV *self; /* the perl counterpart of this request, if any */ |
|
|
74 | |
87 | |
75 | SV *data, *callback; |
88 | SV *data, *callback; |
76 | SV *fh, *fh2; |
89 | SV *fh, *fh2; |
77 | void *dataptr, *data2ptr; |
90 | void *dataptr, *data2ptr; |
78 | Stat_t *statdata; |
91 | Stat_t *statdata; |
79 | off_t offset; |
92 | off_t offset; |
80 | size_t length; |
93 | size_t length; |
81 | ssize_t result; |
94 | ssize_t result; |
82 | |
95 | |
|
|
96 | STRLEN dataoffset; |
83 | int type; |
97 | int type; |
84 | int fd, fd2; |
98 | int fd, fd2; |
85 | int errorno; |
99 | int errorno; |
86 | STRLEN dataoffset; |
|
|
87 | mode_t mode; /* open */ |
100 | mode_t mode; /* open */ |
|
|
101 | |
88 | unsigned char cancelled; |
102 | unsigned char flags; |
|
|
103 | unsigned char pri; |
|
|
104 | |
|
|
105 | SV *self; /* the perl counterpart of this request, if any */ |
|
|
106 | struct aio_cb *grp, *grp_prev, *grp_next, *grp_first; |
89 | } aio_cb; |
107 | } aio_cb; |
|
|
108 | |
|
|
109 | enum { |
|
|
110 | FLAG_CANCELLED = 0x01, |
|
|
111 | }; |
90 | |
112 | |
91 | typedef aio_cb *aio_req; |
113 | typedef aio_cb *aio_req; |
92 | typedef aio_cb *aio_req_ornot; |
114 | typedef aio_cb *aio_req_ornot; |
|
|
115 | |
|
|
116 | enum { |
|
|
117 | PRI_MIN = -4, |
|
|
118 | PRI_MAX = 4, |
|
|
119 | |
|
|
120 | DEFAULT_PRI = 0, |
|
|
121 | PRI_BIAS = -PRI_MIN, |
|
|
122 | NUM_PRI = PRI_MAX + PRI_BIAS + 1, |
|
|
123 | }; |
|
|
124 | |
|
|
125 | static int next_pri = DEFAULT_PRI + PRI_BIAS; |
93 | |
126 | |
94 | static int started, wanted; |
127 | static int started, wanted; |
95 | static volatile int nreqs; |
128 | static volatile int nreqs; |
96 | static int max_outstanding = 1<<30; |
129 | static int max_outstanding = 1<<30; |
97 | static int respipe [2]; |
130 | static int respipe [2]; |
98 | |
131 | |
|
|
132 | #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) |
|
|
133 | # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
|
|
134 | #else |
|
|
135 | # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER |
|
|
136 | #endif |
|
|
137 | |
99 | static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; |
138 | static pthread_mutex_t reslock = AIO_MUTEX_INIT; |
100 | static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; |
139 | static pthread_mutex_t reqlock = AIO_MUTEX_INIT; |
101 | static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; |
140 | static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; |
102 | |
141 | |
103 | static volatile aio_req reqs, reqe; /* queue start, queue end */ |
142 | /* |
104 | static volatile aio_req ress, rese; /* queue start, queue end */ |
143 | * a somewhat faster data structure might be nice, but |
|
|
144 | * with 8 priorities this actually needs <20 insns |
|
|
145 | * per shift, the most expensive operation. |
|
|
146 | */ |
|
|
147 | typedef struct { |
|
|
148 | aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */ |
|
|
149 | int size; |
|
|
150 | } reqq; |
|
|
151 | |
|
|
152 | static reqq req_queue; |
|
|
153 | static reqq res_queue; |
|
|
154 | |
|
|
155 | int reqq_push (reqq *q, aio_req req) |
|
|
156 | { |
|
|
157 | int pri = req->pri; |
|
|
158 | req->next = 0; |
|
|
159 | |
|
|
160 | if (q->qe[pri]) |
|
|
161 | { |
|
|
162 | q->qe[pri]->next = req; |
|
|
163 | q->qe[pri] = req; |
|
|
164 | } |
|
|
165 | else |
|
|
166 | q->qe[pri] = q->qs[pri] = req; |
|
|
167 | |
|
|
168 | return q->size++; |
|
|
169 | } |
|
|
170 | |
|
|
171 | aio_req reqq_shift (reqq *q) |
|
|
172 | { |
|
|
173 | int pri; |
|
|
174 | |
|
|
175 | if (!q->size) |
|
|
176 | return 0; |
|
|
177 | |
|
|
178 | --q->size; |
|
|
179 | |
|
|
180 | for (pri = NUM_PRI; pri--; ) |
|
|
181 | { |
|
|
182 | aio_req req = q->qs[pri]; |
|
|
183 | |
|
|
184 | if (req) |
|
|
185 | { |
|
|
186 | if (!(q->qs[pri] = req->next)) |
|
|
187 | q->qe[pri] = 0; |
|
|
188 | |
|
|
189 | return req; |
|
|
190 | } |
|
|
191 | } |
|
|
192 | |
|
|
193 | abort (); |
|
|
194 | } |
105 | |
195 | |
106 | static void req_invoke (aio_req req); |
196 | static void req_invoke (aio_req req); |
107 | static void req_free (aio_req req); |
197 | static void req_free (aio_req req); |
108 | |
198 | |
109 | /* must be called at most once */ |
199 | /* must be called at most once */ |
… | |
… | |
118 | return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); |
208 | return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); |
119 | } |
209 | } |
120 | |
210 | |
121 | static aio_req SvAIO_REQ (SV *sv) |
211 | static aio_req SvAIO_REQ (SV *sv) |
122 | { |
212 | { |
|
|
213 | MAGIC *mg; |
|
|
214 | |
123 | if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) |
215 | if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) |
124 | croak ("object of class " AIO_REQ_KLASS " expected"); |
216 | croak ("object of class " AIO_REQ_KLASS " expected"); |
125 | |
217 | |
126 | MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); |
218 | mg = mg_find (SvRV (sv), PERL_MAGIC_ext); |
127 | |
219 | |
128 | return mg ? (aio_req)mg->mg_ptr : 0; |
220 | return mg ? (aio_req)mg->mg_ptr : 0; |
129 | } |
221 | } |
130 | |
222 | |
131 | static void aio_grp_feed (aio_req grp) |
223 | static void aio_grp_feed (aio_req grp) |
132 | { |
224 | { |
133 | while (grp->length < grp->fd2) |
225 | while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED)) |
134 | { |
226 | { |
135 | int old_len = grp->length; |
227 | int old_len = grp->length; |
136 | |
228 | |
137 | if (grp->fh2 && SvOK (grp->fh2)) |
229 | if (grp->fh2 && SvOK (grp->fh2)) |
138 | { |
230 | { |
… | |
… | |
141 | ENTER; |
233 | ENTER; |
142 | SAVETMPS; |
234 | SAVETMPS; |
143 | PUSHMARK (SP); |
235 | PUSHMARK (SP); |
144 | XPUSHs (req_sv (grp, AIO_GRP_KLASS)); |
236 | XPUSHs (req_sv (grp, AIO_GRP_KLASS)); |
145 | PUTBACK; |
237 | PUTBACK; |
146 | call_sv (grp->fh2, G_VOID | G_EVAL); |
238 | call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR); |
147 | SPAGAIN; |
239 | SPAGAIN; |
148 | FREETMPS; |
240 | FREETMPS; |
149 | LEAVE; |
241 | LEAVE; |
150 | } |
242 | } |
151 | |
243 | |
… | |
… | |
174 | } |
266 | } |
175 | } |
267 | } |
176 | |
268 | |
177 | static void poll_wait () |
269 | static void poll_wait () |
178 | { |
270 | { |
179 | if (nreqs && !ress) |
|
|
180 | { |
|
|
181 | fd_set rfd; |
271 | fd_set rfd; |
|
|
272 | |
|
|
273 | while (nreqs) |
|
|
274 | { |
|
|
275 | int size; |
|
|
276 | #if !(__i386 || __x86_64) /* safe without sempahore on this archs */ |
|
|
277 | pthread_mutex_lock (&reslock); |
|
|
278 | #endif |
|
|
279 | size = res_queue.size; |
|
|
280 | #if !(__i386 || __x86_64) /* safe without sempahore on this archs */ |
|
|
281 | pthread_mutex_unlock (&reslock); |
|
|
282 | #endif |
|
|
283 | |
|
|
284 | if (size) |
|
|
285 | return; |
|
|
286 | |
182 | FD_ZERO(&rfd); |
287 | FD_ZERO(&rfd); |
183 | FD_SET(respipe [0], &rfd); |
288 | FD_SET(respipe [0], &rfd); |
184 | |
289 | |
185 | select (respipe [0] + 1, &rfd, 0, 0, 0); |
290 | select (respipe [0] + 1, &rfd, 0, 0, 0); |
186 | } |
291 | } |
187 | } |
292 | } |
188 | |
293 | |
189 | static void req_invoke (aio_req req) |
294 | static void req_invoke (aio_req req) |
190 | { |
295 | { |
191 | dSP; |
296 | dSP; |
192 | int errorno = errno; |
|
|
193 | |
297 | |
194 | if (req->cancelled || !SvOK (req->callback)) |
298 | if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback)) |
195 | return; |
299 | { |
196 | |
|
|
197 | errno = req->errorno; |
300 | errno = req->errorno; |
198 | |
301 | |
199 | ENTER; |
302 | ENTER; |
200 | SAVETMPS; |
303 | SAVETMPS; |
201 | PUSHMARK (SP); |
304 | PUSHMARK (SP); |
202 | EXTEND (SP, 1); |
305 | EXTEND (SP, 1); |
203 | |
306 | |
204 | switch (req->type) |
307 | switch (req->type) |
205 | { |
|
|
206 | case REQ_READDIR: |
|
|
207 | { |
308 | { |
208 | SV *rv = &PL_sv_undef; |
309 | case REQ_READDIR: |
209 | |
|
|
210 | if (req->result >= 0) |
|
|
211 | { |
310 | { |
212 | char *buf = req->data2ptr; |
311 | SV *rv = &PL_sv_undef; |
213 | AV *av = newAV (); |
|
|
214 | |
312 | |
215 | while (req->result) |
313 | if (req->result >= 0) |
216 | { |
314 | { |
|
|
315 | char *buf = req->data2ptr; |
|
|
316 | AV *av = newAV (); |
|
|
317 | |
|
|
318 | while (req->result) |
|
|
319 | { |
217 | SV *sv = newSVpv (buf, 0); |
320 | SV *sv = newSVpv (buf, 0); |
218 | |
321 | |
219 | av_push (av, sv); |
322 | av_push (av, sv); |
220 | buf += SvCUR (sv) + 1; |
323 | buf += SvCUR (sv) + 1; |
221 | req->result--; |
324 | req->result--; |
|
|
325 | } |
|
|
326 | |
|
|
327 | rv = sv_2mortal (newRV_noinc ((SV *)av)); |
222 | } |
328 | } |
223 | |
329 | |
224 | rv = sv_2mortal (newRV_noinc ((SV *)av)); |
330 | PUSHs (rv); |
225 | } |
331 | } |
|
|
332 | break; |
226 | |
333 | |
227 | PUSHs (rv); |
334 | case REQ_OPEN: |
|
|
335 | { |
|
|
336 | /* convert fd to fh */ |
|
|
337 | SV *fh; |
|
|
338 | |
|
|
339 | PUSHs (sv_2mortal (newSViv (req->result))); |
|
|
340 | PUTBACK; |
|
|
341 | call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); |
|
|
342 | SPAGAIN; |
|
|
343 | |
|
|
344 | fh = SvREFCNT_inc (POPs); |
|
|
345 | |
|
|
346 | PUSHMARK (SP); |
|
|
347 | XPUSHs (sv_2mortal (fh)); |
|
|
348 | } |
|
|
349 | break; |
|
|
350 | |
|
|
351 | case REQ_GROUP: |
|
|
352 | req->fd = 2; /* mark group as finished */ |
|
|
353 | |
|
|
354 | if (req->data) |
|
|
355 | { |
|
|
356 | int i; |
|
|
357 | AV *av = (AV *)req->data; |
|
|
358 | |
|
|
359 | EXTEND (SP, AvFILL (av) + 1); |
|
|
360 | for (i = 0; i <= AvFILL (av); ++i) |
|
|
361 | PUSHs (*av_fetch (av, i, 0)); |
|
|
362 | } |
|
|
363 | break; |
|
|
364 | |
|
|
365 | case REQ_NOP: |
|
|
366 | case REQ_BUSY: |
|
|
367 | break; |
|
|
368 | |
|
|
369 | default: |
|
|
370 | PUSHs (sv_2mortal (newSViv (req->result))); |
|
|
371 | break; |
228 | } |
372 | } |
229 | break; |
|
|
230 | |
373 | |
231 | case REQ_OPEN: |
|
|
232 | { |
|
|
233 | /* convert fd to fh */ |
|
|
234 | SV *fh; |
|
|
235 | |
374 | |
236 | PUSHs (sv_2mortal (newSViv (req->result))); |
|
|
237 | PUTBACK; |
375 | PUTBACK; |
238 | call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); |
|
|
239 | SPAGAIN; |
|
|
240 | |
|
|
241 | fh = SvREFCNT_inc (POPs); |
|
|
242 | |
|
|
243 | PUSHMARK (SP); |
|
|
244 | XPUSHs (sv_2mortal (fh)); |
|
|
245 | } |
|
|
246 | break; |
|
|
247 | |
|
|
248 | case REQ_GROUP: |
|
|
249 | req->fd = 2; /* mark group as finished */ |
|
|
250 | |
|
|
251 | if (req->data) |
|
|
252 | { |
|
|
253 | int i; |
|
|
254 | AV *av = (AV *)req->data; |
|
|
255 | |
|
|
256 | EXTEND (SP, AvFILL (av) + 1); |
|
|
257 | for (i = 0; i <= AvFILL (av); ++i) |
|
|
258 | PUSHs (*av_fetch (av, i, 0)); |
|
|
259 | } |
|
|
260 | break; |
|
|
261 | |
|
|
262 | case REQ_SLEEP: |
|
|
263 | break; |
|
|
264 | |
|
|
265 | default: |
|
|
266 | PUSHs (sv_2mortal (newSViv (req->result))); |
|
|
267 | break; |
|
|
268 | } |
|
|
269 | |
|
|
270 | |
|
|
271 | PUTBACK; |
|
|
272 | call_sv (req->callback, G_VOID | G_EVAL); |
376 | call_sv (req->callback, G_VOID | G_EVAL); |
273 | SPAGAIN; |
377 | SPAGAIN; |
274 | |
378 | |
275 | FREETMPS; |
379 | FREETMPS; |
276 | LEAVE; |
380 | LEAVE; |
277 | |
|
|
278 | errno = errorno; |
|
|
279 | |
|
|
280 | if (SvTRUE (ERRSV)) |
|
|
281 | { |
381 | } |
282 | req_free (req); |
|
|
283 | croak (0); |
|
|
284 | } |
|
|
285 | } |
|
|
286 | |
382 | |
287 | static void req_free (aio_req req) |
|
|
288 | { |
|
|
289 | if (req->grp) |
383 | if (req->grp) |
290 | { |
384 | { |
291 | aio_req grp = req->grp; |
385 | aio_req grp = req->grp; |
292 | |
386 | |
293 | /* unlink request */ |
387 | /* unlink request */ |
… | |
… | |
298 | grp->grp_first = req->grp_next; |
392 | grp->grp_first = req->grp_next; |
299 | |
393 | |
300 | aio_grp_dec (grp); |
394 | aio_grp_dec (grp); |
301 | } |
395 | } |
302 | |
396 | |
|
|
397 | if (SvTRUE (ERRSV)) |
|
|
398 | { |
|
|
399 | req_free (req); |
|
|
400 | croak (0); |
|
|
401 | } |
|
|
402 | } |
|
|
403 | |
|
|
404 | static void req_free (aio_req req) |
|
|
405 | { |
303 | if (req->self) |
406 | if (req->self) |
304 | { |
407 | { |
305 | sv_unmagic (req->self, PERL_MAGIC_ext); |
408 | sv_unmagic (req->self, PERL_MAGIC_ext); |
306 | SvREFCNT_dec (req->self); |
409 | SvREFCNT_dec (req->self); |
307 | } |
410 | } |
… | |
… | |
318 | Safefree (req); |
421 | Safefree (req); |
319 | } |
422 | } |
320 | |
423 | |
321 | static void req_cancel (aio_req req) |
424 | static void req_cancel (aio_req req) |
322 | { |
425 | { |
323 | req->cancelled = 1; |
426 | req->flags |= FLAG_CANCELLED; |
324 | |
427 | |
325 | if (req->type == REQ_GROUP) |
428 | if (req->type == REQ_GROUP) |
326 | { |
429 | { |
327 | aio_req sub; |
430 | aio_req sub; |
328 | |
431 | |
… | |
… | |
339 | aio_req req; |
442 | aio_req req; |
340 | |
443 | |
341 | for (;;) |
444 | for (;;) |
342 | { |
445 | { |
343 | pthread_mutex_lock (&reslock); |
446 | pthread_mutex_lock (&reslock); |
344 | req = ress; |
447 | req = reqq_shift (&res_queue); |
345 | |
448 | |
346 | if (req) |
449 | if (req) |
347 | { |
450 | { |
348 | ress = req->next; |
|
|
349 | |
|
|
350 | if (!ress) |
451 | if (!res_queue.size) |
351 | { |
452 | { |
352 | /* read any signals sent by the worker threads */ |
453 | /* read any signals sent by the worker threads */ |
353 | char buf [32]; |
454 | char buf [32]; |
354 | while (read (respipe [0], buf, 32) == 32) |
455 | while (read (respipe [0], buf, 32) == 32) |
355 | ; |
456 | ; |
356 | |
|
|
357 | rese = 0; |
|
|
358 | } |
457 | } |
359 | } |
458 | } |
360 | |
459 | |
361 | pthread_mutex_unlock (&reslock); |
460 | pthread_mutex_unlock (&reslock); |
362 | |
461 | |
… | |
… | |
425 | start_thread (); |
524 | start_thread (); |
426 | |
525 | |
427 | ++nreqs; |
526 | ++nreqs; |
428 | |
527 | |
429 | pthread_mutex_lock (&reqlock); |
528 | pthread_mutex_lock (&reqlock); |
430 | |
529 | reqq_push (&req_queue, req); |
431 | req->next = 0; |
|
|
432 | |
|
|
433 | if (reqe) |
|
|
434 | { |
|
|
435 | reqe->next = req; |
|
|
436 | reqe = req; |
|
|
437 | } |
|
|
438 | else |
|
|
439 | reqe = reqs = req; |
|
|
440 | |
|
|
441 | pthread_cond_signal (&reqwait); |
530 | pthread_cond_signal (&reqwait); |
442 | pthread_mutex_unlock (&reqlock); |
531 | pthread_mutex_unlock (&reqlock); |
443 | |
532 | |
444 | if (nreqs > max_outstanding) |
533 | if (nreqs > max_outstanding) |
445 | for (;;) |
534 | for (;;) |
… | |
… | |
454 | } |
543 | } |
455 | |
544 | |
456 | static void end_thread (void) |
545 | static void end_thread (void) |
457 | { |
546 | { |
458 | aio_req req; |
547 | aio_req req; |
|
|
548 | |
459 | Newz (0, req, 1, aio_cb); |
549 | Newz (0, req, 1, aio_cb); |
|
|
550 | |
460 | req->type = REQ_QUIT; |
551 | req->type = REQ_QUIT; |
|
|
552 | req->pri = PRI_MAX + PRI_BIAS; |
461 | |
553 | |
462 | req_send (req); |
554 | req_send (req); |
463 | } |
555 | } |
464 | |
556 | |
465 | static void min_parallel (int nthreads) |
557 | static void min_parallel (int nthreads) |
… | |
… | |
552 | #if !HAVE_READAHEAD |
644 | #if !HAVE_READAHEAD |
553 | # define readahead aio_readahead |
645 | # define readahead aio_readahead |
554 | |
646 | |
555 | static ssize_t readahead (int fd, off_t offset, size_t count) |
647 | static ssize_t readahead (int fd, off_t offset, size_t count) |
556 | { |
648 | { |
557 | char readahead_buf[4096]; |
649 | dBUF; |
558 | |
650 | |
559 | while (count > 0) |
651 | while (count > 0) |
560 | { |
652 | { |
561 | size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); |
653 | size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE; |
562 | |
654 | |
563 | pread (fd, readahead_buf, len, offset); |
655 | pread (fd, aio_buf, len, offset); |
564 | offset += len; |
656 | offset += len; |
565 | count -= len; |
657 | count -= len; |
566 | } |
658 | } |
|
|
659 | |
|
|
660 | fBUF; |
567 | |
661 | |
568 | errno = 0; |
662 | errno = 0; |
569 | } |
663 | } |
570 | #endif |
664 | #endif |
571 | |
665 | |
… | |
… | |
657 | #endif |
751 | #endif |
658 | ) |
752 | ) |
659 | ) |
753 | ) |
660 | { |
754 | { |
661 | /* emulate sendfile. this is a major pain in the ass */ |
755 | /* emulate sendfile. this is a major pain in the ass */ |
662 | char buf[4096]; |
756 | dBUF; |
|
|
757 | |
663 | res = 0; |
758 | res = 0; |
664 | |
759 | |
665 | while (count) |
760 | while (count) |
666 | { |
761 | { |
667 | ssize_t cnt; |
762 | ssize_t cnt; |
668 | |
763 | |
669 | cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); |
764 | cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset); |
670 | |
765 | |
671 | if (cnt <= 0) |
766 | if (cnt <= 0) |
672 | { |
767 | { |
673 | if (cnt && !res) res = -1; |
768 | if (cnt && !res) res = -1; |
674 | break; |
769 | break; |
675 | } |
770 | } |
676 | |
771 | |
677 | cnt = write (ofd, buf, cnt); |
772 | cnt = write (ofd, aio_buf, cnt); |
678 | |
773 | |
679 | if (cnt <= 0) |
774 | if (cnt <= 0) |
680 | { |
775 | { |
681 | if (cnt && !res) res = -1; |
776 | if (cnt && !res) res = -1; |
682 | break; |
777 | break; |
… | |
… | |
684 | |
779 | |
685 | offset += cnt; |
780 | offset += cnt; |
686 | res += cnt; |
781 | res += cnt; |
687 | count -= cnt; |
782 | count -= cnt; |
688 | } |
783 | } |
|
|
784 | |
|
|
785 | fBUF; |
689 | } |
786 | } |
690 | |
787 | |
691 | return res; |
788 | return res; |
692 | } |
789 | } |
693 | |
790 | |
694 | /* read a full directory */ |
791 | /* read a full directory */ |
695 | static int scandir_ (const char *path, void **namesp) |
792 | static int scandir_ (const char *path, void **namesp) |
696 | { |
793 | { |
697 | DIR *dirp = opendir (path); |
794 | DIR *dirp; |
698 | union |
795 | union |
699 | { |
796 | { |
700 | struct dirent d; |
797 | struct dirent d; |
701 | char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; |
798 | char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; |
702 | } u; |
799 | } *u; |
703 | struct dirent *entp; |
800 | struct dirent *entp; |
704 | char *name, *names; |
801 | char *name, *names; |
705 | int memlen = 4096; |
802 | int memlen = 4096; |
706 | int memofs = 0; |
803 | int memofs = 0; |
707 | int res = 0; |
804 | int res = 0; |
708 | int errorno; |
805 | int errorno; |
709 | |
806 | |
|
|
807 | dirp = opendir (path); |
710 | if (!dirp) |
808 | if (!dirp) |
711 | return -1; |
809 | return -1; |
712 | |
810 | |
|
|
811 | u = malloc (sizeof (*u)); |
713 | names = malloc (memlen); |
812 | names = malloc (memlen); |
714 | |
813 | |
|
|
814 | if (u && names) |
715 | for (;;) |
815 | for (;;) |
716 | { |
816 | { |
|
|
817 | errno = 0; |
717 | errno = 0, readdir_r (dirp, &u.d, &entp); |
818 | readdir_r (dirp, &u->d, &entp); |
718 | |
819 | |
719 | if (!entp) |
820 | if (!entp) |
720 | break; |
821 | break; |
721 | |
822 | |
722 | name = entp->d_name; |
823 | name = entp->d_name; |
723 | |
824 | |
724 | if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) |
825 | if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) |
725 | { |
826 | { |
726 | int len = strlen (name) + 1; |
827 | int len = strlen (name) + 1; |
727 | |
828 | |
728 | res++; |
829 | res++; |
729 | |
830 | |
730 | while (memofs + len > memlen) |
831 | while (memofs + len > memlen) |
731 | { |
832 | { |
732 | memlen *= 2; |
833 | memlen *= 2; |
733 | names = realloc (names, memlen); |
834 | names = realloc (names, memlen); |
734 | if (!names) |
835 | if (!names) |
735 | break; |
836 | break; |
736 | } |
837 | } |
737 | |
838 | |
738 | memcpy (names + memofs, name, len); |
839 | memcpy (names + memofs, name, len); |
739 | memofs += len; |
840 | memofs += len; |
740 | } |
841 | } |
741 | } |
842 | } |
742 | |
843 | |
743 | errorno = errno; |
844 | errorno = errno; |
|
|
845 | free (u); |
744 | closedir (dirp); |
846 | closedir (dirp); |
745 | |
847 | |
746 | if (errorno) |
848 | if (errorno) |
747 | { |
849 | { |
748 | free (names); |
850 | free (names); |
… | |
… | |
765 | { |
867 | { |
766 | pthread_mutex_lock (&reqlock); |
868 | pthread_mutex_lock (&reqlock); |
767 | |
869 | |
768 | for (;;) |
870 | for (;;) |
769 | { |
871 | { |
770 | req = reqs; |
872 | req = reqq_shift (&req_queue); |
771 | |
|
|
772 | if (reqs) |
|
|
773 | { |
|
|
774 | reqs = reqs->next; |
|
|
775 | if (!reqs) reqe = 0; |
|
|
776 | } |
|
|
777 | |
873 | |
778 | if (req) |
874 | if (req) |
779 | break; |
875 | break; |
780 | |
876 | |
781 | pthread_cond_wait (&reqwait, &reqlock); |
877 | pthread_cond_wait (&reqwait, &reqlock); |
782 | } |
878 | } |
783 | |
879 | |
784 | pthread_mutex_unlock (&reqlock); |
880 | pthread_mutex_unlock (&reqlock); |
785 | |
881 | |
786 | errno = 0; /* strictly unnecessary */ |
882 | errno = 0; /* strictly unnecessary */ |
787 | |
|
|
788 | if (!req->cancelled) |
|
|
789 | switch (type = req->type) /* remember type for QUIT check */ |
883 | type = req->type; /* remember type for QUIT check */ |
|
|
884 | |
|
|
885 | if (!(req->flags & FLAG_CANCELLED)) |
|
|
886 | switch (type) |
790 | { |
887 | { |
791 | case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; |
888 | case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; |
792 | case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; |
889 | case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; |
793 | |
890 | |
794 | case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; |
891 | case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; |
… | |
… | |
808 | |
905 | |
809 | case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; |
906 | case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; |
810 | case REQ_FSYNC: req->result = fsync (req->fd); break; |
907 | case REQ_FSYNC: req->result = fsync (req->fd); break; |
811 | case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; |
908 | case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; |
812 | |
909 | |
813 | case REQ_SLEEP: |
910 | case REQ_BUSY: |
814 | { |
911 | { |
815 | struct timeval tv; |
912 | struct timeval tv; |
816 | |
913 | |
817 | tv.tv_sec = req->fd; |
914 | tv.tv_sec = req->fd; |
818 | tv.tv_usec = req->fd2; |
915 | tv.tv_usec = req->fd2; |
819 | |
916 | |
820 | req->result = select (0, 0, 0, 0, &tv); |
917 | req->result = select (0, 0, 0, 0, &tv); |
821 | } |
918 | } |
822 | |
919 | |
|
|
920 | case REQ_GROUP: |
|
|
921 | case REQ_NOP: |
823 | case REQ_QUIT: |
922 | case REQ_QUIT: |
824 | break; |
923 | break; |
825 | |
924 | |
826 | default: |
925 | default: |
827 | req->result = ENOSYS; |
926 | req->result = ENOSYS; |
… | |
… | |
830 | |
929 | |
831 | req->errorno = errno; |
930 | req->errorno = errno; |
832 | |
931 | |
833 | pthread_mutex_lock (&reslock); |
932 | pthread_mutex_lock (&reslock); |
834 | |
933 | |
835 | req->next = 0; |
934 | if (!reqq_push (&res_queue, req)) |
836 | |
|
|
837 | if (rese) |
|
|
838 | { |
|
|
839 | rese->next = req; |
|
|
840 | rese = req; |
|
|
841 | } |
|
|
842 | else |
|
|
843 | { |
|
|
844 | rese = ress = req; |
|
|
845 | |
|
|
846 | /* write a dummy byte to the pipe so fh becomes ready */ |
935 | /* write a dummy byte to the pipe so fh becomes ready */ |
847 | write (respipe [1], &respipe, 1); |
936 | write (respipe [1], &respipe, 1); |
848 | } |
|
|
849 | |
937 | |
850 | pthread_mutex_unlock (&reslock); |
938 | pthread_mutex_unlock (&reslock); |
851 | } |
939 | } |
852 | while (type != REQ_QUIT); |
940 | while (type != REQ_QUIT); |
853 | |
941 | |
… | |
… | |
884 | { |
972 | { |
885 | aio_req prv; |
973 | aio_req prv; |
886 | |
974 | |
887 | started = 0; |
975 | started = 0; |
888 | |
976 | |
889 | while (reqs) |
977 | while (prv = reqq_shift (&req_queue)) |
890 | { |
|
|
891 | prv = reqs; |
|
|
892 | reqs = prv->next; |
|
|
893 | req_free (prv); |
978 | req_free (prv); |
894 | } |
|
|
895 | |
979 | |
896 | reqs = reqe = 0; |
980 | while (prv = reqq_shift (&res_queue)) |
897 | |
|
|
898 | while (ress) |
|
|
899 | { |
|
|
900 | prv = ress; |
|
|
901 | ress = prv->next; |
|
|
902 | req_free (prv); |
981 | req_free (prv); |
903 | } |
982 | |
904 | |
|
|
905 | ress = rese = 0; |
|
|
906 | |
|
|
907 | close (respipe [0]); |
983 | close (respipe [0]); |
908 | close (respipe [1]); |
984 | close (respipe [1]); |
909 | create_pipe (); |
985 | create_pipe (); |
910 | |
986 | |
911 | atfork_parent (); |
987 | atfork_parent (); |
912 | } |
988 | } |
913 | |
989 | |
914 | #define dREQ \ |
990 | #define dREQ \ |
915 | aio_req req; \ |
991 | aio_req req; \ |
|
|
992 | int req_pri = next_pri; \ |
|
|
993 | next_pri = DEFAULT_PRI + PRI_BIAS; \ |
916 | \ |
994 | \ |
917 | if (SvOK (callback) && !SvROK (callback)) \ |
995 | if (SvOK (callback) && !SvROK (callback)) \ |
918 | croak ("callback must be undef or of reference type"); \ |
996 | croak ("callback must be undef or of reference type"); \ |
919 | \ |
997 | \ |
920 | Newz (0, req, 1, aio_cb); \ |
998 | Newz (0, req, 1, aio_cb); \ |
921 | if (!req) \ |
999 | if (!req) \ |
922 | croak ("out of memory during aio_req allocation"); \ |
1000 | croak ("out of memory during aio_req allocation"); \ |
923 | \ |
1001 | \ |
924 | req->callback = newSVsv (callback) |
1002 | req->callback = newSVsv (callback); \ |
|
|
1003 | req->pri = req_pri |
925 | |
1004 | |
926 | #define REQ_SEND \ |
1005 | #define REQ_SEND \ |
927 | req_send (req); \ |
1006 | req_send (req); \ |
928 | \ |
1007 | \ |
929 | if (GIMME_V != G_VOID) \ |
1008 | if (GIMME_V != G_VOID) \ |
… | |
… | |
1183 | |
1262 | |
1184 | REQ_SEND; |
1263 | REQ_SEND; |
1185 | } |
1264 | } |
1186 | |
1265 | |
1187 | void |
1266 | void |
1188 | aio_sleep (delay,callback=&PL_sv_undef) |
1267 | aio_busy (delay,callback=&PL_sv_undef) |
1189 | double delay |
1268 | double delay |
1190 | SV * callback |
1269 | SV * callback |
1191 | PPCODE: |
1270 | PPCODE: |
1192 | { |
1271 | { |
1193 | dREQ; |
1272 | dREQ; |
1194 | |
1273 | |
1195 | req->type = REQ_SLEEP; |
1274 | req->type = REQ_BUSY; |
1196 | req->fd = delay < 0. ? 0 : delay; |
1275 | req->fd = delay < 0. ? 0 : delay; |
1197 | req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); |
1276 | req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); |
1198 | |
1277 | |
1199 | REQ_SEND; |
1278 | REQ_SEND; |
1200 | } |
1279 | } |
… | |
… | |
1204 | SV * callback |
1283 | SV * callback |
1205 | PROTOTYPE: ;$ |
1284 | PROTOTYPE: ;$ |
1206 | PPCODE: |
1285 | PPCODE: |
1207 | { |
1286 | { |
1208 | dREQ; |
1287 | dREQ; |
|
|
1288 | |
1209 | req->type = REQ_GROUP; |
1289 | req->type = REQ_GROUP; |
1210 | req_send (req); |
1290 | req_send (req); |
|
|
1291 | |
1211 | XPUSHs (req_sv (req, AIO_GRP_KLASS)); |
1292 | XPUSHs (req_sv (req, AIO_GRP_KLASS)); |
1212 | } |
1293 | } |
|
|
1294 | |
|
|
1295 | void |
|
|
1296 | aio_nop (callback=&PL_sv_undef) |
|
|
1297 | SV * callback |
|
|
1298 | PPCODE: |
|
|
1299 | { |
|
|
1300 | dREQ; |
|
|
1301 | |
|
|
1302 | req->type = REQ_NOP; |
|
|
1303 | |
|
|
1304 | REQ_SEND; |
|
|
1305 | } |
|
|
1306 | |
|
|
1307 | void |
|
|
1308 | aioreq_pri (int pri = DEFAULT_PRI) |
|
|
1309 | CODE: |
|
|
1310 | if (pri < PRI_MIN) pri = PRI_MIN; |
|
|
1311 | if (pri > PRI_MAX) pri = PRI_MAX; |
|
|
1312 | next_pri = pri + PRI_BIAS; |
|
|
1313 | |
|
|
1314 | void |
|
|
1315 | aioreq_nice (int nice = 0) |
|
|
1316 | CODE: |
|
|
1317 | nice = next_pri - nice; |
|
|
1318 | if (nice < PRI_MIN) nice = PRI_MIN; |
|
|
1319 | if (nice > PRI_MAX) nice = PRI_MAX; |
|
|
1320 | next_pri = nice + PRI_BIAS; |
1213 | |
1321 | |
1214 | void |
1322 | void |
1215 | flush () |
1323 | flush () |
1216 | PROTOTYPE: |
1324 | PROTOTYPE: |
1217 | CODE: |
1325 | CODE: |
… | |
… | |
1266 | |
1374 | |
1267 | MODULE = IO::AIO PACKAGE = IO::AIO::REQ |
1375 | MODULE = IO::AIO PACKAGE = IO::AIO::REQ |
1268 | |
1376 | |
1269 | void |
1377 | void |
1270 | cancel (aio_req_ornot req) |
1378 | cancel (aio_req_ornot req) |
1271 | PROTOTYPE: |
|
|
1272 | CODE: |
1379 | CODE: |
1273 | req_cancel (req); |
1380 | req_cancel (req); |
1274 | |
1381 | |
|
|
1382 | void |
|
|
1383 | cb (aio_req_ornot req, SV *callback=&PL_sv_undef) |
|
|
1384 | CODE: |
|
|
1385 | SvREFCNT_dec (req->callback); |
|
|
1386 | req->callback = newSVsv (callback); |
|
|
1387 | |
1275 | MODULE = IO::AIO PACKAGE = IO::AIO::GRP |
1388 | MODULE = IO::AIO PACKAGE = IO::AIO::GRP |
1276 | |
1389 | |
1277 | void |
1390 | void |
1278 | add (aio_req grp, ...) |
1391 | add (aio_req grp, ...) |
1279 | PPCODE: |
1392 | PPCODE: |
1280 | { |
1393 | { |
1281 | int i; |
1394 | int i; |
|
|
1395 | aio_req req; |
1282 | |
1396 | |
1283 | if (grp->fd == 2) |
1397 | if (grp->fd == 2) |
1284 | croak ("cannot add requests to IO::AIO::GRP after the group finished"); |
1398 | croak ("cannot add requests to IO::AIO::GRP after the group finished"); |
1285 | |
1399 | |
1286 | for (i = 1; i < items; ++i ) |
1400 | for (i = 1; i < items; ++i ) |
1287 | { |
1401 | { |
1288 | if (GIMME_V != G_VOID) |
1402 | if (GIMME_V != G_VOID) |
1289 | XPUSHs (sv_2mortal (newSVsv (ST (i)))); |
1403 | XPUSHs (sv_2mortal (newSVsv (ST (i)))); |
1290 | |
1404 | |
1291 | aio_req req = SvAIO_REQ (ST (i)); |
1405 | req = SvAIO_REQ (ST (i)); |
1292 | |
1406 | |
1293 | if (req) |
1407 | if (req) |
1294 | { |
1408 | { |
1295 | ++grp->length; |
1409 | ++grp->length; |
1296 | req->grp = grp; |
1410 | req->grp = grp; |
… | |
… | |
1319 | SvREFCNT_dec (grp->data); |
1433 | SvREFCNT_dec (grp->data); |
1320 | grp->data = (SV *)av; |
1434 | grp->data = (SV *)av; |
1321 | } |
1435 | } |
1322 | |
1436 | |
1323 | void |
1437 | void |
1324 | lock (aio_req grp) |
|
|
1325 | CODE: |
|
|
1326 | ++grp->length; |
|
|
1327 | |
|
|
1328 | void |
|
|
1329 | unlock (aio_req grp) |
|
|
1330 | CODE: |
|
|
1331 | aio_grp_dec (grp); |
|
|
1332 | |
|
|
1333 | void |
|
|
1334 | feeder_limit (aio_req grp, int limit) |
1438 | limit (aio_req grp, int limit) |
1335 | CODE: |
1439 | CODE: |
1336 | grp->fd2 = limit; |
1440 | grp->fd2 = limit; |
1337 | aio_grp_feed (grp); |
1441 | aio_grp_feed (grp); |
1338 | |
1442 | |
1339 | void |
1443 | void |
1340 | set_feeder (aio_req grp, SV *callback=&PL_sv_undef) |
1444 | feed (aio_req grp, SV *callback=&PL_sv_undef) |
1341 | CODE: |
1445 | CODE: |
1342 | { |
1446 | { |
1343 | SvREFCNT_dec (grp->fh2); |
1447 | SvREFCNT_dec (grp->fh2); |
1344 | grp->fh2 = newSVsv (callback); |
1448 | grp->fh2 = newSVsv (callback); |
1345 | |
1449 | |