… | |
… | |
40 | #ifndef _WIN32 |
40 | #ifndef _WIN32 |
41 | # include "config.h" |
41 | # include "config.h" |
42 | #endif |
42 | #endif |
43 | |
43 | |
44 | #include "eio.h" |
44 | #include "eio.h" |
|
|
45 | #include "ecb.h" |
45 | |
46 | |
46 | #ifdef EIO_STACKSIZE |
47 | #ifdef EIO_STACKSIZE |
47 | # define XTHREAD_STACKSIZE EIO_STACKSIZE |
48 | # define XTHREAD_STACKSIZE EIO_STACKSIZE |
48 | #endif |
49 | #endif |
49 | #include "xthread.h" |
50 | #include "xthread.h" |
… | |
… | |
53 | #include <stdlib.h> |
54 | #include <stdlib.h> |
54 | #include <string.h> |
55 | #include <string.h> |
55 | #include <errno.h> |
56 | #include <errno.h> |
56 | #include <sys/types.h> |
57 | #include <sys/types.h> |
57 | #include <sys/stat.h> |
58 | #include <sys/stat.h> |
58 | #include <sys/statvfs.h> |
|
|
59 | #include <limits.h> |
59 | #include <limits.h> |
60 | #include <fcntl.h> |
60 | #include <fcntl.h> |
61 | #include <assert.h> |
61 | #include <assert.h> |
62 | |
62 | |
|
|
63 | #include <sys/statvfs.h> |
63 | /* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */ |
64 | /* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */ |
64 | /* intptr_t only comes form stdint.h, says idiot openbsd coder */ |
65 | /* intptr_t only comes from stdint.h, says idiot openbsd coder */ |
65 | #if HAVE_STDINT_H |
66 | #if HAVE_STDINT_H |
66 | # include <stdint.h> |
67 | # include <stdint.h> |
67 | #endif |
68 | #endif |
68 | |
69 | |
|
|
70 | #ifndef ECANCELED |
|
|
71 | # define ECANCELED EDOM |
|
|
72 | #endif |
|
|
73 | |
|
|
74 | static void eio_destroy (eio_req *req); |
|
|
75 | |
69 | #ifndef EIO_FINISH |
76 | #ifndef EIO_FINISH |
70 | # define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 |
77 | # define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 |
71 | #endif |
78 | #endif |
72 | |
79 | |
73 | #ifndef EIO_DESTROY |
80 | #ifndef EIO_DESTROY |
… | |
… | |
76 | |
83 | |
77 | #ifndef EIO_FEED |
84 | #ifndef EIO_FEED |
78 | # define EIO_FEED(req) do { if ((req)->feed ) (req)->feed (req); } while (0) |
85 | # define EIO_FEED(req) do { if ((req)->feed ) (req)->feed (req); } while (0) |
79 | #endif |
86 | #endif |
80 | |
87 | |
|
|
88 | #ifndef EIO_FD_TO_WIN32_HANDLE |
|
|
89 | # define EIO_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) |
|
|
90 | #endif |
|
|
91 | #ifndef EIO_WIN32_HANDLE_TO_FD |
|
|
92 | # define EIO_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) |
|
|
93 | #endif |
|
|
94 | |
|
|
95 | #define EIO_ERRNO(errval,retval) ((errno = errval), retval) |
|
|
96 | |
|
|
97 | #define EIO_ENOSYS() EIO_ERRNO (ENOSYS, -1) |
|
|
98 | |
81 | #ifdef _WIN32 |
99 | #ifdef _WIN32 |
82 | |
100 | |
83 | /*doh*/ |
101 | #define PAGESIZE 4096 /* GetSystemInfo? */ |
|
|
102 | |
|
|
103 | #define stat(path,buf) _stati64 (path,buf) |
|
|
104 | #define lstat(path,buf) stat (path,buf) |
|
|
105 | #define fstat(fd,buf) _fstati64 (path,buf) |
|
|
106 | #define fsync(fd) (FlushFileBuffers (EIO_FD_TO_WIN32_HANDLE (fd)) ? 0 : EIO_ERRNO (EBADF, -1)) |
|
|
107 | #define mkdir(path,mode) _mkdir (path) |
|
|
108 | #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) |
|
|
109 | |
|
|
110 | #define chown(path,uid,gid) EIO_ENOSYS () |
|
|
111 | #define fchown(fd,uid,gid) EIO_ENOSYS () |
|
|
112 | #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ |
|
|
113 | #define ftruncate(fd,offs) EIO_ENOSYS () /* near-miss: SetEndOfFile */ |
|
|
114 | #define mknod(path,mode,dev) EIO_ENOSYS () |
|
|
115 | #define sync() EIO_ENOSYS () |
|
|
116 | |
|
|
117 | /* we could even stat and see if it exists */ |
|
|
118 | static int |
|
|
119 | symlink (const char *old, const char *neu) |
|
|
120 | { |
|
|
121 | if (CreateSymbolicLink (neu, old, 1)) |
|
|
122 | return 0; |
|
|
123 | |
|
|
124 | if (CreateSymbolicLink (neu, old, 0)) |
|
|
125 | return 0; |
|
|
126 | |
|
|
127 | return EIO_ERRNO (ENOENT, -1); |
|
|
128 | } |
|
|
129 | |
84 | #else |
130 | #else |
85 | |
131 | |
86 | # include <sys/time.h> |
132 | #include <sys/time.h> |
87 | # include <sys/select.h> |
133 | #include <sys/select.h> |
|
|
134 | #include <sys/statvfs.h> |
88 | # include <unistd.h> |
135 | #include <unistd.h> |
89 | # include <utime.h> |
136 | #include <utime.h> |
90 | # include <signal.h> |
137 | #include <signal.h> |
91 | # include <dirent.h> |
138 | #include <dirent.h> |
92 | |
139 | |
93 | #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES |
140 | #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES |
94 | # include <sys/mman.h> |
141 | #include <sys/mman.h> |
95 | #endif |
142 | #endif |
96 | |
143 | |
97 | /* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ |
144 | /* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ |
98 | # if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ |
145 | #if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ |
99 | # define _DIRENT_HAVE_D_TYPE /* sigh */ |
146 | #define _DIRENT_HAVE_D_TYPE /* sigh */ |
100 | # define D_INO(de) (de)->d_fileno |
147 | #define D_INO(de) (de)->d_fileno |
101 | # define D_NAMLEN(de) (de)->d_namlen |
148 | #define D_NAMLEN(de) (de)->d_namlen |
102 | # elif __linux || defined d_ino || _XOPEN_SOURCE >= 600 |
149 | #elif __linux || defined d_ino || _XOPEN_SOURCE >= 600 |
103 | # define D_INO(de) (de)->d_ino |
150 | #define D_INO(de) (de)->d_ino |
104 | # endif |
151 | #endif |
105 | |
152 | |
106 | #ifdef _D_EXACT_NAMLEN |
153 | #ifdef _D_EXACT_NAMLEN |
107 | # undef D_NAMLEN |
154 | #undef D_NAMLEN |
108 | # define D_NAMLEN(de) _D_EXACT_NAMLEN (de) |
155 | #define D_NAMLEN(de) _D_EXACT_NAMLEN (de) |
109 | #endif |
156 | #endif |
110 | |
157 | |
111 | # ifdef _DIRENT_HAVE_D_TYPE |
158 | #ifdef _DIRENT_HAVE_D_TYPE |
112 | # define D_TYPE(de) (de)->d_type |
159 | #define D_TYPE(de) (de)->d_type |
113 | # endif |
160 | #endif |
114 | |
161 | |
115 | # ifndef EIO_STRUCT_DIRENT |
162 | #ifndef EIO_STRUCT_DIRENT |
116 | # define EIO_STRUCT_DIRENT struct dirent |
163 | #define EIO_STRUCT_DIRENT struct dirent |
117 | # endif |
164 | #endif |
118 | |
165 | |
119 | #endif |
166 | #endif |
120 | |
167 | |
121 | #if HAVE_SENDFILE |
168 | #if HAVE_SENDFILE |
122 | # if __linux |
169 | # if __linux |
… | |
… | |
165 | if (!eio_buf) \ |
212 | if (!eio_buf) \ |
166 | return -1; |
213 | return -1; |
167 | |
214 | |
168 | #define EIO_TICKS ((1000000 + 1023) >> 10) |
215 | #define EIO_TICKS ((1000000 + 1023) >> 10) |
169 | |
216 | |
170 | /*****************************************************************************/ |
|
|
171 | |
|
|
172 | #if __GNUC__ >= 3 |
|
|
173 | # define expect(expr,value) __builtin_expect ((expr),(value)) |
|
|
174 | #else |
|
|
175 | # define expect(expr,value) (expr) |
|
|
176 | #endif |
|
|
177 | |
|
|
178 | #define expect_false(expr) expect ((expr) != 0, 0) |
|
|
179 | #define expect_true(expr) expect ((expr) != 0, 1) |
|
|
180 | |
|
|
181 | /*****************************************************************************/ |
|
|
182 | |
|
|
183 | #define ETP_PRI_MIN EIO_PRI_MIN |
217 | #define ETP_PRI_MIN EIO_PRI_MIN |
184 | #define ETP_PRI_MAX EIO_PRI_MAX |
218 | #define ETP_PRI_MAX EIO_PRI_MAX |
185 | |
219 | |
186 | struct etp_worker; |
220 | struct etp_worker; |
187 | |
221 | |
… | |
… | |
212 | /*****************************************************************************/ |
246 | /*****************************************************************************/ |
213 | |
247 | |
214 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
248 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
215 | |
249 | |
216 | /* calculate time difference in ~1/EIO_TICKS of a second */ |
250 | /* calculate time difference in ~1/EIO_TICKS of a second */ |
|
|
251 | ecb_inline int |
217 | static int tvdiff (struct timeval *tv1, struct timeval *tv2) |
252 | tvdiff (struct timeval *tv1, struct timeval *tv2) |
218 | { |
253 | { |
219 | return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS |
254 | return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS |
220 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
255 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
221 | } |
256 | } |
222 | |
257 | |
… | |
… | |
266 | #define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) |
301 | #define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) |
267 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) |
302 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) |
268 | |
303 | |
269 | /* worker threads management */ |
304 | /* worker threads management */ |
270 | |
305 | |
|
|
306 | static void ecb_cold |
271 | static void etp_worker_clear (etp_worker *wrk) |
307 | etp_worker_clear (etp_worker *wrk) |
272 | { |
308 | { |
273 | ETP_WORKER_CLEAR (wrk); |
309 | ETP_WORKER_CLEAR (wrk); |
274 | } |
310 | } |
275 | |
311 | |
|
|
312 | static void ecb_cold |
276 | static void etp_worker_free (etp_worker *wrk) |
313 | etp_worker_free (etp_worker *wrk) |
277 | { |
314 | { |
278 | wrk->next->prev = wrk->prev; |
315 | wrk->next->prev = wrk->prev; |
279 | wrk->prev->next = wrk->next; |
316 | wrk->prev->next = wrk->next; |
280 | |
317 | |
281 | free (wrk); |
318 | free (wrk); |
282 | } |
319 | } |
283 | |
320 | |
284 | static unsigned int etp_nreqs (void) |
321 | static unsigned int |
|
|
322 | etp_nreqs (void) |
285 | { |
323 | { |
286 | int retval; |
324 | int retval; |
287 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
325 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
288 | retval = nreqs; |
326 | retval = nreqs; |
289 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
327 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
290 | return retval; |
328 | return retval; |
291 | } |
329 | } |
292 | |
330 | |
293 | static unsigned int etp_nready (void) |
331 | static unsigned int |
|
|
332 | etp_nready (void) |
294 | { |
333 | { |
295 | unsigned int retval; |
334 | unsigned int retval; |
296 | |
335 | |
297 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
336 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
298 | retval = nready; |
337 | retval = nready; |
299 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
338 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
300 | |
339 | |
301 | return retval; |
340 | return retval; |
302 | } |
341 | } |
303 | |
342 | |
304 | static unsigned int etp_npending (void) |
343 | static unsigned int |
|
|
344 | etp_npending (void) |
305 | { |
345 | { |
306 | unsigned int retval; |
346 | unsigned int retval; |
307 | |
347 | |
308 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
348 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
309 | retval = npending; |
349 | retval = npending; |
310 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
350 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
311 | |
351 | |
312 | return retval; |
352 | return retval; |
313 | } |
353 | } |
314 | |
354 | |
315 | static unsigned int etp_nthreads (void) |
355 | static unsigned int |
|
|
356 | etp_nthreads (void) |
316 | { |
357 | { |
317 | unsigned int retval; |
358 | unsigned int retval; |
318 | |
359 | |
319 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
360 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
320 | retval = started; |
361 | retval = started; |
… | |
… | |
334 | } etp_reqq; |
375 | } etp_reqq; |
335 | |
376 | |
336 | static etp_reqq req_queue; |
377 | static etp_reqq req_queue; |
337 | static etp_reqq res_queue; |
378 | static etp_reqq res_queue; |
338 | |
379 | |
|
|
380 | static int ecb_noinline |
339 | static int reqq_push (etp_reqq *q, ETP_REQ *req) |
381 | reqq_push (etp_reqq *q, ETP_REQ *req) |
340 | { |
382 | { |
341 | int pri = req->pri; |
383 | int pri = req->pri; |
342 | req->next = 0; |
384 | req->next = 0; |
343 | |
385 | |
344 | if (q->qe[pri]) |
386 | if (q->qe[pri]) |
… | |
… | |
350 | q->qe[pri] = q->qs[pri] = req; |
392 | q->qe[pri] = q->qs[pri] = req; |
351 | |
393 | |
352 | return q->size++; |
394 | return q->size++; |
353 | } |
395 | } |
354 | |
396 | |
|
|
397 | static ETP_REQ * ecb_noinline |
355 | static ETP_REQ *reqq_shift (etp_reqq *q) |
398 | reqq_shift (etp_reqq *q) |
356 | { |
399 | { |
357 | int pri; |
400 | int pri; |
358 | |
401 | |
359 | if (!q->size) |
402 | if (!q->size) |
360 | return 0; |
403 | return 0; |
… | |
… | |
375 | } |
418 | } |
376 | |
419 | |
377 | abort (); |
420 | abort (); |
378 | } |
421 | } |
379 | |
422 | |
|
|
423 | static void ecb_cold |
380 | static void etp_thread_init (void) |
424 | etp_thread_init (void) |
381 | { |
425 | { |
|
|
426 | #if !HAVE_PREADWRITE |
|
|
427 | X_MUTEX_CREATE (preadwritelock); |
|
|
428 | #endif |
382 | X_MUTEX_CREATE (wrklock); |
429 | X_MUTEX_CREATE (wrklock); |
383 | X_MUTEX_CREATE (reslock); |
430 | X_MUTEX_CREATE (reslock); |
384 | X_MUTEX_CREATE (reqlock); |
431 | X_MUTEX_CREATE (reqlock); |
385 | X_COND_CREATE (reqwait); |
432 | X_COND_CREATE (reqwait); |
386 | } |
433 | } |
387 | |
434 | |
|
|
435 | static void ecb_cold |
388 | static void etp_atfork_prepare (void) |
436 | etp_atfork_prepare (void) |
389 | { |
437 | { |
390 | X_LOCK (wrklock); |
|
|
391 | X_LOCK (reqlock); |
|
|
392 | X_LOCK (reslock); |
|
|
393 | #if !HAVE_PREADWRITE |
|
|
394 | X_LOCK (preadwritelock); |
|
|
395 | #endif |
|
|
396 | } |
438 | } |
397 | |
439 | |
|
|
440 | static void ecb_cold |
398 | static void etp_atfork_parent (void) |
441 | etp_atfork_parent (void) |
399 | { |
442 | { |
400 | #if !HAVE_PREADWRITE |
|
|
401 | X_UNLOCK (preadwritelock); |
|
|
402 | #endif |
|
|
403 | X_UNLOCK (reslock); |
|
|
404 | X_UNLOCK (reqlock); |
|
|
405 | X_UNLOCK (wrklock); |
|
|
406 | } |
443 | } |
407 | |
444 | |
|
|
445 | static void ecb_cold |
408 | static void etp_atfork_child (void) |
446 | etp_atfork_child (void) |
409 | { |
447 | { |
410 | ETP_REQ *prv; |
448 | ETP_REQ *prv; |
411 | |
449 | |
412 | while ((prv = reqq_shift (&req_queue))) |
450 | while ((prv = reqq_shift (&req_queue))) |
413 | ETP_DESTROY (prv); |
451 | ETP_DESTROY (prv); |
… | |
… | |
433 | npending = 0; |
471 | npending = 0; |
434 | |
472 | |
435 | etp_thread_init (); |
473 | etp_thread_init (); |
436 | } |
474 | } |
437 | |
475 | |
438 | static void |
476 | static void ecb_cold |
439 | etp_once_init (void) |
477 | etp_once_init (void) |
440 | { |
478 | { |
441 | etp_thread_init (); |
479 | etp_thread_init (); |
442 | X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); |
480 | X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); |
443 | } |
481 | } |
444 | |
482 | |
445 | static int |
483 | static int ecb_cold |
446 | etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
484 | etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
447 | { |
485 | { |
448 | static pthread_once_t doinit = PTHREAD_ONCE_INIT; |
486 | static pthread_once_t doinit = PTHREAD_ONCE_INIT; |
449 | |
487 | |
450 | pthread_once (&doinit, etp_once_init); |
488 | pthread_once (&doinit, etp_once_init); |
… | |
… | |
455 | return 0; |
493 | return 0; |
456 | } |
494 | } |
457 | |
495 | |
458 | X_THREAD_PROC (etp_proc); |
496 | X_THREAD_PROC (etp_proc); |
459 | |
497 | |
|
|
498 | static void ecb_cold |
460 | static void etp_start_thread (void) |
499 | etp_start_thread (void) |
461 | { |
500 | { |
462 | etp_worker *wrk = calloc (1, sizeof (etp_worker)); |
501 | etp_worker *wrk = calloc (1, sizeof (etp_worker)); |
463 | |
502 | |
464 | /*TODO*/ |
503 | /*TODO*/ |
465 | assert (("unable to allocate worker thread data", wrk)); |
504 | assert (("unable to allocate worker thread data", wrk)); |
… | |
… | |
478 | free (wrk); |
517 | free (wrk); |
479 | |
518 | |
480 | X_UNLOCK (wrklock); |
519 | X_UNLOCK (wrklock); |
481 | } |
520 | } |
482 | |
521 | |
|
|
522 | static void |
483 | static void etp_maybe_start_thread (void) |
523 | etp_maybe_start_thread (void) |
484 | { |
524 | { |
485 | if (expect_true (etp_nthreads () >= wanted)) |
525 | if (ecb_expect_true (etp_nthreads () >= wanted)) |
486 | return; |
526 | return; |
487 | |
527 | |
488 | /* todo: maybe use idle here, but might be less exact */ |
528 | /* todo: maybe use idle here, but might be less exact */ |
489 | if (expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) |
529 | if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) |
490 | return; |
530 | return; |
491 | |
531 | |
492 | etp_start_thread (); |
532 | etp_start_thread (); |
493 | } |
533 | } |
494 | |
534 | |
|
|
535 | static void ecb_cold |
495 | static void etp_end_thread (void) |
536 | etp_end_thread (void) |
496 | { |
537 | { |
497 | eio_req *req = calloc (1, sizeof (eio_req)); |
538 | eio_req *req = calloc (1, sizeof (eio_req)); |
498 | |
539 | |
499 | req->type = -1; |
540 | req->type = -1; |
500 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
541 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
… | |
… | |
507 | X_LOCK (wrklock); |
548 | X_LOCK (wrklock); |
508 | --started; |
549 | --started; |
509 | X_UNLOCK (wrklock); |
550 | X_UNLOCK (wrklock); |
510 | } |
551 | } |
511 | |
552 | |
512 | static int etp_poll (void) |
553 | static int |
|
|
554 | etp_poll (void) |
513 | { |
555 | { |
514 | unsigned int maxreqs; |
556 | unsigned int maxreqs; |
515 | unsigned int maxtime; |
557 | unsigned int maxtime; |
516 | struct timeval tv_start, tv_now; |
558 | struct timeval tv_start, tv_now; |
517 | |
559 | |
… | |
… | |
547 | |
589 | |
548 | X_LOCK (reqlock); |
590 | X_LOCK (reqlock); |
549 | --nreqs; |
591 | --nreqs; |
550 | X_UNLOCK (reqlock); |
592 | X_UNLOCK (reqlock); |
551 | |
593 | |
552 | if (expect_false (req->type == EIO_GROUP && req->size)) |
594 | if (ecb_expect_false (req->type == EIO_GROUP && req->size)) |
553 | { |
595 | { |
554 | req->int1 = 1; /* mark request as delayed */ |
596 | req->int1 = 1; /* mark request as delayed */ |
555 | continue; |
597 | continue; |
556 | } |
598 | } |
557 | else |
599 | else |
558 | { |
600 | { |
559 | int res = ETP_FINISH (req); |
601 | int res = ETP_FINISH (req); |
560 | if (expect_false (res)) |
602 | if (ecb_expect_false (res)) |
561 | return res; |
603 | return res; |
562 | } |
604 | } |
563 | |
605 | |
564 | if (expect_false (maxreqs && !--maxreqs)) |
606 | if (ecb_expect_false (maxreqs && !--maxreqs)) |
565 | break; |
607 | break; |
566 | |
608 | |
567 | if (maxtime) |
609 | if (maxtime) |
568 | { |
610 | { |
569 | gettimeofday (&tv_now, 0); |
611 | gettimeofday (&tv_now, 0); |
… | |
… | |
575 | |
617 | |
576 | errno = EAGAIN; |
618 | errno = EAGAIN; |
577 | return -1; |
619 | return -1; |
578 | } |
620 | } |
579 | |
621 | |
|
|
622 | static void |
580 | static void etp_cancel (ETP_REQ *req) |
623 | etp_cancel (ETP_REQ *req) |
581 | { |
624 | { |
582 | X_LOCK (wrklock); |
625 | req->cancelled = 1; |
583 | req->flags |= EIO_FLAG_CANCELLED; |
|
|
584 | X_UNLOCK (wrklock); |
|
|
585 | |
626 | |
586 | eio_grp_cancel (req); |
627 | eio_grp_cancel (req); |
587 | } |
628 | } |
588 | |
629 | |
|
|
630 | static void |
589 | static void etp_submit (ETP_REQ *req) |
631 | etp_submit (ETP_REQ *req) |
590 | { |
632 | { |
591 | req->pri -= ETP_PRI_MIN; |
633 | req->pri -= ETP_PRI_MIN; |
592 | |
634 | |
593 | if (expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; |
635 | if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; |
594 | if (expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
636 | if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
595 | |
637 | |
596 | if (expect_false (req->type == EIO_GROUP)) |
638 | if (ecb_expect_false (req->type == EIO_GROUP)) |
597 | { |
639 | { |
598 | /* I hope this is worth it :/ */ |
640 | /* I hope this is worth it :/ */ |
599 | X_LOCK (reqlock); |
641 | X_LOCK (reqlock); |
600 | ++nreqs; |
642 | ++nreqs; |
601 | X_UNLOCK (reqlock); |
643 | X_UNLOCK (reqlock); |
… | |
… | |
620 | |
662 | |
621 | etp_maybe_start_thread (); |
663 | etp_maybe_start_thread (); |
622 | } |
664 | } |
623 | } |
665 | } |
624 | |
666 | |
|
|
667 | static void ecb_cold |
625 | static void etp_set_max_poll_time (double nseconds) |
668 | etp_set_max_poll_time (double nseconds) |
626 | { |
669 | { |
627 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
670 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
628 | max_poll_time = nseconds * EIO_TICKS; |
671 | max_poll_time = nseconds * EIO_TICKS; |
629 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
672 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
630 | } |
673 | } |
631 | |
674 | |
|
|
675 | static void ecb_cold |
632 | static void etp_set_max_poll_reqs (unsigned int maxreqs) |
676 | etp_set_max_poll_reqs (unsigned int maxreqs) |
633 | { |
677 | { |
634 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
678 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
635 | max_poll_reqs = maxreqs; |
679 | max_poll_reqs = maxreqs; |
636 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
680 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
637 | } |
681 | } |
638 | |
682 | |
|
|
683 | static void ecb_cold |
639 | static void etp_set_max_idle (unsigned int nthreads) |
684 | etp_set_max_idle (unsigned int nthreads) |
640 | { |
685 | { |
641 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
686 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
642 | max_idle = nthreads; |
687 | max_idle = nthreads; |
643 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
688 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
644 | } |
689 | } |
645 | |
690 | |
|
|
691 | static void ecb_cold |
646 | static void etp_set_idle_timeout (unsigned int seconds) |
692 | etp_set_idle_timeout (unsigned int seconds) |
647 | { |
693 | { |
648 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
694 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
649 | idle_timeout = seconds; |
695 | idle_timeout = seconds; |
650 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
696 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
651 | } |
697 | } |
652 | |
698 | |
|
|
699 | static void ecb_cold |
653 | static void etp_set_min_parallel (unsigned int nthreads) |
700 | etp_set_min_parallel (unsigned int nthreads) |
654 | { |
701 | { |
655 | if (wanted < nthreads) |
702 | if (wanted < nthreads) |
656 | wanted = nthreads; |
703 | wanted = nthreads; |
657 | } |
704 | } |
658 | |
705 | |
|
|
706 | static void ecb_cold |
659 | static void etp_set_max_parallel (unsigned int nthreads) |
707 | etp_set_max_parallel (unsigned int nthreads) |
660 | { |
708 | { |
661 | if (wanted > nthreads) |
709 | if (wanted > nthreads) |
662 | wanted = nthreads; |
710 | wanted = nthreads; |
663 | |
711 | |
664 | while (started > wanted) |
712 | while (started > wanted) |
665 | etp_end_thread (); |
713 | etp_end_thread (); |
666 | } |
714 | } |
667 | |
715 | |
668 | /*****************************************************************************/ |
716 | /*****************************************************************************/ |
669 | |
717 | |
|
|
718 | static void |
670 | static void grp_try_feed (eio_req *grp) |
719 | grp_try_feed (eio_req *grp) |
671 | { |
720 | { |
672 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
721 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
673 | { |
722 | { |
674 | grp->flags &= ~EIO_FLAG_GROUPADD; |
723 | grp->flags &= ~EIO_FLAG_GROUPADD; |
675 | |
724 | |
… | |
… | |
682 | break; |
731 | break; |
683 | } |
732 | } |
684 | } |
733 | } |
685 | } |
734 | } |
686 | |
735 | |
|
|
736 | static int |
687 | static int grp_dec (eio_req *grp) |
737 | grp_dec (eio_req *grp) |
688 | { |
738 | { |
689 | --grp->size; |
739 | --grp->size; |
690 | |
740 | |
691 | /* call feeder, if applicable */ |
741 | /* call feeder, if applicable */ |
692 | grp_try_feed (grp); |
742 | grp_try_feed (grp); |
… | |
… | |
696 | return eio_finish (grp); |
746 | return eio_finish (grp); |
697 | else |
747 | else |
698 | return 0; |
748 | return 0; |
699 | } |
749 | } |
700 | |
750 | |
|
|
751 | static void |
701 | void eio_destroy (eio_req *req) |
752 | eio_destroy (eio_req *req) |
702 | { |
753 | { |
703 | if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); |
754 | if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); |
704 | if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); |
755 | if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); |
705 | |
756 | |
706 | EIO_DESTROY (req); |
757 | EIO_DESTROY (req); |
707 | } |
758 | } |
708 | |
759 | |
|
|
760 | static int |
709 | static int eio_finish (eio_req *req) |
761 | eio_finish (eio_req *req) |
710 | { |
762 | { |
711 | int res = EIO_FINISH (req); |
763 | int res = EIO_FINISH (req); |
712 | |
764 | |
713 | if (req->grp) |
765 | if (req->grp) |
714 | { |
766 | { |
… | |
… | |
722 | if (grp->grp_first == req) |
774 | if (grp->grp_first == req) |
723 | grp->grp_first = req->grp_next; |
775 | grp->grp_first = req->grp_next; |
724 | |
776 | |
725 | res2 = grp_dec (grp); |
777 | res2 = grp_dec (grp); |
726 | |
778 | |
727 | if (!res && res2) |
779 | if (!res) |
728 | res = res2; |
780 | res = res2; |
729 | } |
781 | } |
730 | |
782 | |
731 | eio_destroy (req); |
783 | eio_destroy (req); |
732 | |
784 | |
733 | return res; |
785 | return res; |
734 | } |
786 | } |
735 | |
787 | |
|
|
788 | void |
736 | void eio_grp_cancel (eio_req *grp) |
789 | eio_grp_cancel (eio_req *grp) |
737 | { |
790 | { |
738 | for (grp = grp->grp_first; grp; grp = grp->grp_next) |
791 | for (grp = grp->grp_first; grp; grp = grp->grp_next) |
739 | eio_cancel (grp); |
792 | eio_cancel (grp); |
740 | } |
793 | } |
741 | |
794 | |
|
|
795 | void |
742 | void eio_cancel (eio_req *req) |
796 | eio_cancel (eio_req *req) |
743 | { |
797 | { |
744 | etp_cancel (req); |
798 | etp_cancel (req); |
745 | } |
799 | } |
746 | |
800 | |
|
|
801 | void |
747 | void eio_submit (eio_req *req) |
802 | eio_submit (eio_req *req) |
748 | { |
803 | { |
749 | etp_submit (req); |
804 | etp_submit (req); |
750 | } |
805 | } |
751 | |
806 | |
752 | unsigned int eio_nreqs (void) |
807 | unsigned int |
|
|
808 | eio_nreqs (void) |
753 | { |
809 | { |
754 | return etp_nreqs (); |
810 | return etp_nreqs (); |
755 | } |
811 | } |
756 | |
812 | |
757 | unsigned int eio_nready (void) |
813 | unsigned int |
|
|
814 | eio_nready (void) |
758 | { |
815 | { |
759 | return etp_nready (); |
816 | return etp_nready (); |
760 | } |
817 | } |
761 | |
818 | |
762 | unsigned int eio_npending (void) |
819 | unsigned int |
|
|
820 | eio_npending (void) |
763 | { |
821 | { |
764 | return etp_npending (); |
822 | return etp_npending (); |
765 | } |
823 | } |
766 | |
824 | |
767 | unsigned int eio_nthreads (void) |
825 | unsigned int ecb_cold |
|
|
826 | eio_nthreads (void) |
768 | { |
827 | { |
769 | return etp_nthreads (); |
828 | return etp_nthreads (); |
770 | } |
829 | } |
771 | |
830 | |
|
|
831 | void ecb_cold |
772 | void eio_set_max_poll_time (double nseconds) |
832 | eio_set_max_poll_time (double nseconds) |
773 | { |
833 | { |
774 | etp_set_max_poll_time (nseconds); |
834 | etp_set_max_poll_time (nseconds); |
775 | } |
835 | } |
776 | |
836 | |
|
|
837 | void ecb_cold |
777 | void eio_set_max_poll_reqs (unsigned int maxreqs) |
838 | eio_set_max_poll_reqs (unsigned int maxreqs) |
778 | { |
839 | { |
779 | etp_set_max_poll_reqs (maxreqs); |
840 | etp_set_max_poll_reqs (maxreqs); |
780 | } |
841 | } |
781 | |
842 | |
|
|
843 | void ecb_cold |
782 | void eio_set_max_idle (unsigned int nthreads) |
844 | eio_set_max_idle (unsigned int nthreads) |
783 | { |
845 | { |
784 | etp_set_max_idle (nthreads); |
846 | etp_set_max_idle (nthreads); |
785 | } |
847 | } |
786 | |
848 | |
|
|
849 | void ecb_cold |
787 | void eio_set_idle_timeout (unsigned int seconds) |
850 | eio_set_idle_timeout (unsigned int seconds) |
788 | { |
851 | { |
789 | etp_set_idle_timeout (seconds); |
852 | etp_set_idle_timeout (seconds); |
790 | } |
853 | } |
791 | |
854 | |
|
|
855 | void ecb_cold |
792 | void eio_set_min_parallel (unsigned int nthreads) |
856 | eio_set_min_parallel (unsigned int nthreads) |
793 | { |
857 | { |
794 | etp_set_min_parallel (nthreads); |
858 | etp_set_min_parallel (nthreads); |
795 | } |
859 | } |
796 | |
860 | |
|
|
861 | void ecb_cold |
797 | void eio_set_max_parallel (unsigned int nthreads) |
862 | eio_set_max_parallel (unsigned int nthreads) |
798 | { |
863 | { |
799 | etp_set_max_parallel (nthreads); |
864 | etp_set_max_parallel (nthreads); |
800 | } |
865 | } |
801 | |
866 | |
802 | int eio_poll (void) |
867 | int eio_poll (void) |
… | |
… | |
872 | #ifndef HAVE_FUTIMES |
937 | #ifndef HAVE_FUTIMES |
873 | |
938 | |
874 | # undef futimes |
939 | # undef futimes |
875 | # define futimes(fd,times) eio__futimes (fd, times) |
940 | # define futimes(fd,times) eio__futimes (fd, times) |
876 | |
941 | |
|
|
942 | static int |
877 | static int eio__futimes (int fd, const struct timeval tv[2]) |
943 | eio__futimes (int fd, const struct timeval tv[2]) |
878 | { |
944 | { |
879 | errno = ENOSYS; |
945 | errno = ENOSYS; |
880 | return -1; |
946 | return -1; |
881 | } |
947 | } |
882 | |
948 | |
… | |
… | |
886 | # undef fdatasync |
952 | # undef fdatasync |
887 | # define fdatasync(fd) fsync (fd) |
953 | # define fdatasync(fd) fsync (fd) |
888 | #endif |
954 | #endif |
889 | |
955 | |
890 | /* sync_file_range always needs emulation */ |
956 | /* sync_file_range always needs emulation */ |
891 | int |
957 | static int |
892 | eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) |
958 | eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) |
893 | { |
959 | { |
894 | #if HAVE_SYNC_FILE_RANGE |
960 | #if HAVE_SYNC_FILE_RANGE |
895 | int res; |
961 | int res; |
896 | |
962 | |
… | |
… | |
913 | /* even though we could play tricks with the flags, it's better to always |
979 | /* even though we could play tricks with the flags, it's better to always |
914 | * call fdatasync, as that matches the expectation of its users best */ |
980 | * call fdatasync, as that matches the expectation of its users best */ |
915 | return fdatasync (fd); |
981 | return fdatasync (fd); |
916 | } |
982 | } |
917 | |
983 | |
|
|
984 | static int |
|
|
985 | eio__fallocate (int fd, int mode, off_t offset, size_t len) |
|
|
986 | { |
|
|
987 | #if HAVE_FALLOCATE |
|
|
988 | return fallocate (fd, mode, offset, len); |
|
|
989 | #else |
|
|
990 | errno = ENOSYS; |
|
|
991 | return -1; |
|
|
992 | #endif |
|
|
993 | } |
|
|
994 | |
918 | #if !HAVE_READAHEAD |
995 | #if !HAVE_READAHEAD |
919 | # undef readahead |
996 | # undef readahead |
920 | # define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) |
997 | # define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) |
921 | |
998 | |
922 | static ssize_t |
999 | static ssize_t |
… | |
… | |
950 | if (!count) |
1027 | if (!count) |
951 | return 0; |
1028 | return 0; |
952 | |
1029 | |
953 | for (;;) |
1030 | for (;;) |
954 | { |
1031 | { |
|
|
1032 | #ifdef __APPLE__ |
|
|
1033 | # undef HAVE_SENDFILE /* broken, as everything on os x */ |
|
|
1034 | #endif |
955 | #if HAVE_SENDFILE |
1035 | #if HAVE_SENDFILE |
956 | # if __linux |
1036 | # if __linux |
957 | off_t soffset = offset; |
1037 | off_t soffset = offset; |
958 | res = sendfile (ofd, ifd, &soffset, count); |
1038 | res = sendfile (ofd, ifd, &soffset, count); |
959 | |
1039 | |
… | |
… | |
1002 | if (res < 0 && sbytes) |
1082 | if (res < 0 && sbytes) |
1003 | res = sbytes; |
1083 | res = sbytes; |
1004 | |
1084 | |
1005 | # endif |
1085 | # endif |
1006 | |
1086 | |
1007 | #elif defined (_WIN32) |
1087 | #elif defined (_WIN32) && 0 |
1008 | /* does not work, just for documentation of what would need to be done */ |
1088 | /* does not work, just for documentation of what would need to be done */ |
|
|
1089 | /* actually, cannot be done like this, as TransmitFile changes the file offset, */ |
|
|
1090 | /* libeio guarantees that the file offset does not change, and windows */ |
|
|
1091 | /* has no way to get an independent handle to the same file description */ |
1009 | HANDLE h = TO_SOCKET (ifd); |
1092 | HANDLE h = TO_SOCKET (ifd); |
1010 | SetFilePointer (h, offset, 0, FILE_BEGIN); |
1093 | SetFilePointer (h, offset, 0, FILE_BEGIN); |
1011 | res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0); |
1094 | res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0); |
1012 | |
1095 | |
1013 | #else |
1096 | #else |
… | |
… | |
1081 | count -= cnt; |
1164 | count -= cnt; |
1082 | } |
1165 | } |
1083 | } |
1166 | } |
1084 | |
1167 | |
1085 | return res; |
1168 | return res; |
|
|
1169 | } |
|
|
1170 | |
|
|
1171 | #ifdef PAGESIZE |
|
|
1172 | # define eio_pagesize() PAGESIZE |
|
|
1173 | #else |
|
|
1174 | static intptr_t |
|
|
1175 | eio_pagesize (void) |
|
|
1176 | { |
|
|
1177 | static intptr_t page; |
|
|
1178 | |
|
|
1179 | if (!page) |
|
|
1180 | page = sysconf (_SC_PAGESIZE); |
|
|
1181 | |
|
|
1182 | return page; |
|
|
1183 | } |
|
|
1184 | #endif |
|
|
1185 | |
|
|
1186 | static void |
|
|
1187 | eio_page_align (void **addr, size_t *length) |
|
|
1188 | { |
|
|
1189 | intptr_t mask = eio_pagesize () - 1; |
|
|
1190 | |
|
|
1191 | /* round down addr */ |
|
|
1192 | intptr_t adj = mask & (intptr_t)*addr; |
|
|
1193 | |
|
|
1194 | *addr = (void *)((intptr_t)*addr - adj); |
|
|
1195 | *length += adj; |
|
|
1196 | |
|
|
1197 | /* round up length */ |
|
|
1198 | *length = (*length + mask) & ~mask; |
|
|
1199 | } |
|
|
1200 | |
|
|
1201 | #if !_POSIX_MEMLOCK |
|
|
1202 | # define eio__mlockall(a) eio_nosyscall() |
|
|
1203 | #else |
|
|
1204 | |
|
|
1205 | static int |
|
|
1206 | eio__mlockall (int flags) |
|
|
1207 | { |
|
|
1208 | #if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7 |
|
|
1209 | extern int mallopt (int, int); |
|
|
1210 | mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */ |
|
|
1211 | #endif |
|
|
1212 | |
|
|
1213 | if (EIO_MCL_CURRENT != MCL_CURRENT |
|
|
1214 | || EIO_MCL_FUTURE != MCL_FUTURE) |
|
|
1215 | { |
|
|
1216 | flags = 0 |
|
|
1217 | | (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0) |
|
|
1218 | | (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0); |
|
|
1219 | } |
|
|
1220 | |
|
|
1221 | return mlockall (flags); |
|
|
1222 | } |
|
|
1223 | #endif |
|
|
1224 | |
|
|
1225 | #if !_POSIX_MEMLOCK_RANGE |
|
|
1226 | # define eio__mlock(a,b) EIO_ENOSYS () |
|
|
1227 | #else |
|
|
1228 | |
|
|
1229 | static int |
|
|
1230 | eio__mlock (void *addr, size_t length) |
|
|
1231 | { |
|
|
1232 | eio_page_align (&addr, &length); |
|
|
1233 | |
|
|
1234 | return mlock (addr, length); |
|
|
1235 | } |
|
|
1236 | |
|
|
1237 | #endif |
|
|
1238 | |
|
|
1239 | #if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO) |
|
|
1240 | # define eio__msync(a,b,c) EIO_ENOSYS () |
|
|
1241 | #else |
|
|
1242 | |
|
|
1243 | static int |
|
|
1244 | eio__msync (void *mem, size_t len, int flags) |
|
|
1245 | { |
|
|
1246 | eio_page_align (&mem, &len); |
|
|
1247 | |
|
|
1248 | if (EIO_MS_ASYNC != MS_SYNC |
|
|
1249 | || EIO_MS_INVALIDATE != MS_INVALIDATE |
|
|
1250 | || EIO_MS_SYNC != MS_SYNC) |
|
|
1251 | { |
|
|
1252 | flags = 0 |
|
|
1253 | | (flags & EIO_MS_ASYNC ? MS_ASYNC : 0) |
|
|
1254 | | (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0) |
|
|
1255 | | (flags & EIO_MS_SYNC ? MS_SYNC : 0); |
|
|
1256 | } |
|
|
1257 | |
|
|
1258 | return msync (mem, len, flags); |
|
|
1259 | } |
|
|
1260 | |
|
|
1261 | #endif |
|
|
1262 | |
|
|
1263 | static int |
|
|
1264 | eio__mtouch (eio_req *req) |
|
|
1265 | { |
|
|
1266 | void *mem = req->ptr2; |
|
|
1267 | size_t len = req->size; |
|
|
1268 | int flags = req->int1; |
|
|
1269 | |
|
|
1270 | eio_page_align (&mem, &len); |
|
|
1271 | |
|
|
1272 | { |
|
|
1273 | intptr_t addr = (intptr_t)mem; |
|
|
1274 | intptr_t end = addr + len; |
|
|
1275 | intptr_t page = eio_pagesize (); |
|
|
1276 | |
|
|
1277 | if (addr < end) |
|
|
1278 | if (flags & EIO_MT_MODIFY) /* modify */ |
|
|
1279 | do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
|
|
1280 | else |
|
|
1281 | do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
|
|
1282 | } |
|
|
1283 | |
|
|
1284 | return 0; |
|
|
1285 | } |
|
|
1286 | |
|
|
1287 | /*****************************************************************************/ |
|
|
1288 | /* requests implemented outside eio_execute, because they are so large */ |
|
|
1289 | |
|
|
1290 | static void |
|
|
1291 | eio__realpath (eio_req *req, etp_worker *self) |
|
|
1292 | { |
|
|
1293 | char *rel = req->ptr1; |
|
|
1294 | char *res; |
|
|
1295 | char *tmp1, *tmp2; |
|
|
1296 | #if SYMLOOP_MAX > 32 |
|
|
1297 | int symlinks = SYMLOOP_MAX; |
|
|
1298 | #else |
|
|
1299 | int symlinks = 32; |
|
|
1300 | #endif |
|
|
1301 | |
|
|
1302 | req->result = -1; |
|
|
1303 | |
|
|
1304 | errno = EINVAL; |
|
|
1305 | if (!rel) |
|
|
1306 | return; |
|
|
1307 | |
|
|
1308 | errno = ENOENT; |
|
|
1309 | if (!*rel) |
|
|
1310 | return; |
|
|
1311 | |
|
|
1312 | if (!req->ptr2) |
|
|
1313 | { |
|
|
1314 | X_LOCK (wrklock); |
|
|
1315 | req->flags |= EIO_FLAG_PTR2_FREE; |
|
|
1316 | X_UNLOCK (wrklock); |
|
|
1317 | req->ptr2 = malloc (PATH_MAX * 3); |
|
|
1318 | |
|
|
1319 | errno = ENOMEM; |
|
|
1320 | if (!req->ptr2) |
|
|
1321 | return; |
|
|
1322 | } |
|
|
1323 | |
|
|
1324 | res = req->ptr2; |
|
|
1325 | tmp1 = res + PATH_MAX; |
|
|
1326 | tmp2 = tmp1 + PATH_MAX; |
|
|
1327 | |
|
|
1328 | #if 0 /* disabled, the musl way to do things is just too racy */ |
|
|
1329 | #if __linux && defined(O_NONBLOCK) && defined(O_NOATIME) |
|
|
1330 | /* on linux we may be able to ask the kernel */ |
|
|
1331 | { |
|
|
1332 | int fd = open (rel, O_RDONLY | O_NONBLOCK | O_NOCTTY | O_NOATIME); |
|
|
1333 | |
|
|
1334 | if (fd >= 0) |
|
|
1335 | { |
|
|
1336 | sprintf (tmp1, "/proc/self/fd/%d", fd); |
|
|
1337 | req->result = readlink (tmp1, res, PATH_MAX); |
|
|
1338 | close (fd); |
|
|
1339 | |
|
|
1340 | /* here we should probably stat the open file and the disk file, to make sure they still match */ |
|
|
1341 | |
|
|
1342 | if (req->result > 0) |
|
|
1343 | goto done; |
|
|
1344 | } |
|
|
1345 | else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO) |
|
|
1346 | return; |
|
|
1347 | } |
|
|
1348 | #endif |
|
|
1349 | #endif |
|
|
1350 | |
|
|
1351 | if (*rel != '/') |
|
|
1352 | { |
|
|
1353 | if (!getcwd (res, PATH_MAX)) |
|
|
1354 | return; |
|
|
1355 | |
|
|
1356 | if (res [1]) /* only use if not / */ |
|
|
1357 | res += strlen (res); |
|
|
1358 | } |
|
|
1359 | |
|
|
1360 | while (*rel) |
|
|
1361 | { |
|
|
1362 | ssize_t len, linklen; |
|
|
1363 | char *beg = rel; |
|
|
1364 | |
|
|
1365 | while (*rel && *rel != '/') |
|
|
1366 | ++rel; |
|
|
1367 | |
|
|
1368 | len = rel - beg; |
|
|
1369 | |
|
|
1370 | if (!len) /* skip slashes */ |
|
|
1371 | { |
|
|
1372 | ++rel; |
|
|
1373 | continue; |
|
|
1374 | } |
|
|
1375 | |
|
|
1376 | if (beg [0] == '.') |
|
|
1377 | { |
|
|
1378 | if (len == 1) |
|
|
1379 | continue; /* . - nop */ |
|
|
1380 | |
|
|
1381 | if (beg [1] == '.' && len == 2) |
|
|
1382 | { |
|
|
1383 | /* .. - back up one component, if possible */ |
|
|
1384 | |
|
|
1385 | while (res != req->ptr2) |
|
|
1386 | if (*--res == '/') |
|
|
1387 | break; |
|
|
1388 | |
|
|
1389 | continue; |
|
|
1390 | } |
|
|
1391 | } |
|
|
1392 | |
|
|
1393 | errno = ENAMETOOLONG; |
|
|
1394 | if (res + 1 + len + 1 >= tmp1) |
|
|
1395 | return; |
|
|
1396 | |
|
|
1397 | /* copy one component */ |
|
|
1398 | *res = '/'; |
|
|
1399 | memcpy (res + 1, beg, len); |
|
|
1400 | |
|
|
1401 | /* zero-terminate, for readlink */ |
|
|
1402 | res [len + 1] = 0; |
|
|
1403 | |
|
|
1404 | /* now check if it's a symlink */ |
|
|
1405 | linklen = readlink (req->ptr2, tmp1, PATH_MAX); |
|
|
1406 | |
|
|
1407 | if (linklen < 0) |
|
|
1408 | { |
|
|
1409 | if (errno != EINVAL) |
|
|
1410 | return; |
|
|
1411 | |
|
|
1412 | /* it's a normal directory. hopefully */ |
|
|
1413 | res += len + 1; |
|
|
1414 | } |
|
|
1415 | else |
|
|
1416 | { |
|
|
1417 | /* yay, it was a symlink - build new path in tmp2 */ |
|
|
1418 | int rellen = strlen (rel); |
|
|
1419 | |
|
|
1420 | errno = ENAMETOOLONG; |
|
|
1421 | if (linklen + 1 + rellen >= PATH_MAX) |
|
|
1422 | return; |
|
|
1423 | |
|
|
1424 | errno = ELOOP; |
|
|
1425 | if (!--symlinks) |
|
|
1426 | return; |
|
|
1427 | |
|
|
1428 | if (*tmp1 == '/') |
|
|
1429 | res = req->ptr2; /* symlink resolves to an absolute path */ |
|
|
1430 | |
|
|
1431 | /* we need to be careful, as rel might point into tmp2 already */ |
|
|
1432 | memmove (tmp2 + linklen + 1, rel, rellen + 1); |
|
|
1433 | tmp2 [linklen] = '/'; |
|
|
1434 | memcpy (tmp2, tmp1, linklen); |
|
|
1435 | |
|
|
1436 | rel = tmp2; |
|
|
1437 | } |
|
|
1438 | } |
|
|
1439 | |
|
|
1440 | /* special case for the lone root path */ |
|
|
1441 | if (res == req->ptr2) |
|
|
1442 | *res++ = '/'; |
|
|
1443 | |
|
|
1444 | req->result = res - (char *)req->ptr2; |
|
|
1445 | |
|
|
1446 | done: |
|
|
1447 | req->ptr2 = realloc (req->ptr2, req->result); /* trade time for space savings */ |
1086 | } |
1448 | } |
1087 | |
1449 | |
1088 | static signed char |
1450 | static signed char |
1089 | eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) |
1451 | eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) |
1090 | { |
1452 | { |
… | |
… | |
1272 | |
1634 | |
1273 | X_LOCK (wrklock); |
1635 | X_LOCK (wrklock); |
1274 | /* the corresponding closedir is in ETP_WORKER_CLEAR */ |
1636 | /* the corresponding closedir is in ETP_WORKER_CLEAR */ |
1275 | self->dirp = dirp = opendir (req->ptr1); |
1637 | self->dirp = dirp = opendir (req->ptr1); |
1276 | |
1638 | |
|
|
1639 | if (req->flags & EIO_FLAG_PTR1_FREE) |
|
|
1640 | free (req->ptr1); |
|
|
1641 | |
1277 | req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; |
1642 | req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; |
1278 | req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; |
1643 | req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; |
1279 | req->ptr2 = names = malloc (namesalloc); |
1644 | req->ptr2 = names = malloc (namesalloc); |
1280 | X_UNLOCK (wrklock); |
1645 | X_UNLOCK (wrklock); |
1281 | |
1646 | |
… | |
… | |
1332 | /* skip . and .. entries */ |
1697 | /* skip . and .. entries */ |
1333 | if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) |
1698 | if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) |
1334 | { |
1699 | { |
1335 | int len = D_NAMLEN (entp) + 1; |
1700 | int len = D_NAMLEN (entp) + 1; |
1336 | |
1701 | |
1337 | while (expect_false (namesoffs + len > namesalloc)) |
1702 | while (ecb_expect_false (namesoffs + len > namesalloc)) |
1338 | { |
1703 | { |
1339 | namesalloc *= 2; |
1704 | namesalloc *= 2; |
1340 | X_LOCK (wrklock); |
1705 | X_LOCK (wrklock); |
1341 | req->ptr2 = names = realloc (names, namesalloc); |
1706 | req->ptr2 = names = realloc (names, namesalloc); |
1342 | X_UNLOCK (wrklock); |
1707 | X_UNLOCK (wrklock); |
… | |
… | |
1349 | |
1714 | |
1350 | if (dents) |
1715 | if (dents) |
1351 | { |
1716 | { |
1352 | struct eio_dirent *ent; |
1717 | struct eio_dirent *ent; |
1353 | |
1718 | |
1354 | if (expect_false (dentoffs == dentalloc)) |
1719 | if (ecb_expect_false (dentoffs == dentalloc)) |
1355 | { |
1720 | { |
1356 | dentalloc *= 2; |
1721 | dentalloc *= 2; |
1357 | X_LOCK (wrklock); |
1722 | X_LOCK (wrklock); |
1358 | req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent)); |
1723 | req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent)); |
1359 | X_UNLOCK (wrklock); |
1724 | X_UNLOCK (wrklock); |
… | |
… | |
1447 | break; |
1812 | break; |
1448 | } |
1813 | } |
1449 | } |
1814 | } |
1450 | } |
1815 | } |
1451 | |
1816 | |
1452 | #ifdef PAGESIZE |
|
|
1453 | # define eio_pagesize() PAGESIZE |
|
|
1454 | #else |
|
|
1455 | static intptr_t |
|
|
1456 | eio_pagesize (void) |
|
|
1457 | { |
|
|
1458 | static intptr_t page; |
|
|
1459 | |
|
|
1460 | if (!page) |
|
|
1461 | page = sysconf (_SC_PAGESIZE); |
|
|
1462 | |
|
|
1463 | return page; |
|
|
1464 | } |
|
|
1465 | #endif |
|
|
1466 | |
|
|
1467 | static void |
|
|
1468 | eio_page_align (void **addr, size_t *length) |
|
|
1469 | { |
|
|
1470 | intptr_t mask = eio_pagesize () - 1; |
|
|
1471 | |
|
|
1472 | /* round down addr */ |
|
|
1473 | intptr_t adj = mask & (intptr_t)*addr; |
|
|
1474 | |
|
|
1475 | *addr = (void *)((intptr_t)*addr - adj); |
|
|
1476 | *length += adj; |
|
|
1477 | |
|
|
1478 | /* round up length */ |
|
|
1479 | *length = (*length + mask) & ~mask; |
|
|
1480 | } |
|
|
1481 | |
|
|
1482 | #if !_POSIX_MEMLOCK |
|
|
1483 | # define eio__mlockall(a) ((errno = ENOSYS), -1) |
|
|
1484 | #else |
|
|
1485 | |
|
|
1486 | static int |
|
|
1487 | eio__mlockall (int flags) |
|
|
1488 | { |
|
|
1489 | #if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7 |
|
|
1490 | extern int mallopt (int, int); |
|
|
1491 | mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */ |
|
|
1492 | #endif |
|
|
1493 | |
|
|
1494 | if (EIO_MCL_CURRENT != MCL_CURRENT |
|
|
1495 | || EIO_MCL_FUTURE != MCL_FUTURE) |
|
|
1496 | { |
|
|
1497 | flags = 0 |
|
|
1498 | | (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0) |
|
|
1499 | | (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0); |
|
|
1500 | } |
|
|
1501 | |
|
|
1502 | return mlockall (flags); |
|
|
1503 | } |
|
|
1504 | #endif |
|
|
1505 | |
|
|
1506 | #if !_POSIX_MEMLOCK_RANGE |
|
|
1507 | # define eio__mlock(a,b) ((errno = ENOSYS), -1) |
|
|
1508 | #else |
|
|
1509 | |
|
|
1510 | static int |
|
|
1511 | eio__mlock (void *addr, size_t length) |
|
|
1512 | { |
|
|
1513 | eio_page_align (&addr, &length); |
|
|
1514 | |
|
|
1515 | return mlock (addr, length); |
|
|
1516 | } |
|
|
1517 | |
|
|
1518 | #endif |
|
|
1519 | |
|
|
1520 | #if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO) |
|
|
1521 | # define eio__msync(a,b,c) ((errno = ENOSYS), -1) |
|
|
1522 | #else |
|
|
1523 | |
|
|
1524 | int |
|
|
1525 | eio__msync (void *mem, size_t len, int flags) |
|
|
1526 | { |
|
|
1527 | eio_page_align (&mem, &len); |
|
|
1528 | |
|
|
1529 | if (EIO_MS_ASYNC != MS_SYNC |
|
|
1530 | || EIO_MS_INVALIDATE != MS_INVALIDATE |
|
|
1531 | || EIO_MS_SYNC != MS_SYNC) |
|
|
1532 | { |
|
|
1533 | flags = 0 |
|
|
1534 | | (flags & EIO_MS_ASYNC ? MS_ASYNC : 0) |
|
|
1535 | | (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0) |
|
|
1536 | | (flags & EIO_MS_SYNC ? MS_SYNC : 0); |
|
|
1537 | } |
|
|
1538 | |
|
|
1539 | return msync (mem, len, flags); |
|
|
1540 | } |
|
|
1541 | |
|
|
1542 | #endif |
|
|
1543 | |
|
|
1544 | int |
|
|
1545 | eio__mtouch (eio_req *req) |
|
|
1546 | { |
|
|
1547 | void *mem = req->ptr2; |
|
|
1548 | size_t len = req->size; |
|
|
1549 | int flags = req->int1; |
|
|
1550 | |
|
|
1551 | eio_page_align (&mem, &len); |
|
|
1552 | |
|
|
1553 | { |
|
|
1554 | intptr_t addr = (intptr_t)mem; |
|
|
1555 | intptr_t end = addr + len; |
|
|
1556 | intptr_t page = eio_pagesize (); |
|
|
1557 | |
|
|
1558 | if (addr < end) |
|
|
1559 | if (flags & EIO_MT_MODIFY) /* modify */ |
|
|
1560 | do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
|
|
1561 | else |
|
|
1562 | do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
|
|
1563 | } |
|
|
1564 | |
|
|
1565 | return 0; |
|
|
1566 | } |
|
|
1567 | |
|
|
1568 | /*****************************************************************************/ |
1817 | /*****************************************************************************/ |
1569 | |
1818 | |
1570 | #define ALLOC(len) \ |
1819 | #define ALLOC(len) \ |
1571 | if (!req->ptr2) \ |
1820 | if (!req->ptr2) \ |
1572 | { \ |
1821 | { \ |
… | |
… | |
1585 | X_THREAD_PROC (etp_proc) |
1834 | X_THREAD_PROC (etp_proc) |
1586 | { |
1835 | { |
1587 | ETP_REQ *req; |
1836 | ETP_REQ *req; |
1588 | struct timespec ts; |
1837 | struct timespec ts; |
1589 | etp_worker *self = (etp_worker *)thr_arg; |
1838 | etp_worker *self = (etp_worker *)thr_arg; |
|
|
1839 | int timeout; |
1590 | |
1840 | |
1591 | /* try to distribute timeouts somewhat randomly */ |
1841 | /* try to distribute timeouts somewhat evenly */ |
1592 | ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); |
1842 | ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); |
1593 | |
1843 | |
1594 | for (;;) |
1844 | for (;;) |
1595 | { |
1845 | { |
|
|
1846 | ts.tv_sec = 0; |
|
|
1847 | |
1596 | X_LOCK (reqlock); |
1848 | X_LOCK (reqlock); |
1597 | |
1849 | |
1598 | for (;;) |
1850 | for (;;) |
1599 | { |
1851 | { |
1600 | self->req = req = reqq_shift (&req_queue); |
1852 | self->req = req = reqq_shift (&req_queue); |
1601 | |
1853 | |
1602 | if (req) |
1854 | if (req) |
1603 | break; |
1855 | break; |
1604 | |
1856 | |
|
|
1857 | if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
|
|
1858 | { |
|
|
1859 | X_UNLOCK (reqlock); |
|
|
1860 | X_LOCK (wrklock); |
|
|
1861 | --started; |
|
|
1862 | X_UNLOCK (wrklock); |
|
|
1863 | goto quit; |
|
|
1864 | } |
|
|
1865 | |
1605 | ++idle; |
1866 | ++idle; |
1606 | |
1867 | |
1607 | ts.tv_sec = time (0) + idle_timeout; |
1868 | if (idle <= max_idle) |
1608 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
1869 | /* we are allowed to idle, so do so without any timeout */ |
|
|
1870 | X_COND_WAIT (reqwait, reqlock); |
|
|
1871 | else |
1609 | { |
1872 | { |
1610 | if (idle > max_idle) |
1873 | /* initialise timeout once */ |
1611 | { |
1874 | if (!ts.tv_sec) |
1612 | --idle; |
1875 | ts.tv_sec = time (0) + idle_timeout; |
1613 | X_UNLOCK (reqlock); |
|
|
1614 | X_LOCK (wrklock); |
|
|
1615 | --started; |
|
|
1616 | X_UNLOCK (wrklock); |
|
|
1617 | goto quit; |
|
|
1618 | } |
|
|
1619 | |
1876 | |
1620 | /* we are allowed to idle, so do so without any timeout */ |
|
|
1621 | X_COND_WAIT (reqwait, reqlock); |
1877 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
|
|
1878 | ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
1622 | } |
1879 | } |
1623 | |
1880 | |
1624 | --idle; |
1881 | --idle; |
1625 | } |
1882 | } |
1626 | |
1883 | |
… | |
… | |
1629 | X_UNLOCK (reqlock); |
1886 | X_UNLOCK (reqlock); |
1630 | |
1887 | |
1631 | if (req->type < 0) |
1888 | if (req->type < 0) |
1632 | goto quit; |
1889 | goto quit; |
1633 | |
1890 | |
1634 | if (!EIO_CANCELLED (req)) |
|
|
1635 | ETP_EXECUTE (self, req); |
1891 | ETP_EXECUTE (self, req); |
1636 | |
1892 | |
1637 | X_LOCK (reslock); |
1893 | X_LOCK (reslock); |
1638 | |
1894 | |
1639 | ++npending; |
1895 | ++npending; |
1640 | |
1896 | |
… | |
… | |
1655 | return 0; |
1911 | return 0; |
1656 | } |
1912 | } |
1657 | |
1913 | |
1658 | /*****************************************************************************/ |
1914 | /*****************************************************************************/ |
1659 | |
1915 | |
|
|
1916 | int ecb_cold |
1660 | int eio_init (void (*want_poll)(void), void (*done_poll)(void)) |
1917 | eio_init (void (*want_poll)(void), void (*done_poll)(void)) |
1661 | { |
1918 | { |
1662 | return etp_init (want_poll, done_poll); |
1919 | return etp_init (want_poll, done_poll); |
1663 | } |
1920 | } |
1664 | |
1921 | |
|
|
1922 | ecb_inline void |
1665 | static void eio_api_destroy (eio_req *req) |
1923 | eio_api_destroy (eio_req *req) |
1666 | { |
1924 | { |
1667 | free (req); |
1925 | free (req); |
1668 | } |
1926 | } |
1669 | |
1927 | |
1670 | #define REQ(rtype) \ |
1928 | #define REQ(rtype) \ |
… | |
… | |
1689 | { \ |
1947 | { \ |
1690 | eio_api_destroy (req); \ |
1948 | eio_api_destroy (req); \ |
1691 | return 0; \ |
1949 | return 0; \ |
1692 | } |
1950 | } |
1693 | |
1951 | |
|
|
1952 | static void |
1694 | static void eio_execute (etp_worker *self, eio_req *req) |
1953 | eio_execute (etp_worker *self, eio_req *req) |
1695 | { |
1954 | { |
|
|
1955 | if (ecb_expect_false (EIO_CANCELLED (req))) |
|
|
1956 | { |
|
|
1957 | req->result = -1; |
|
|
1958 | req->errorno = ECANCELED; |
|
|
1959 | return; |
|
|
1960 | } |
|
|
1961 | |
1696 | switch (req->type) |
1962 | switch (req->type) |
1697 | { |
1963 | { |
1698 | case EIO_READ: ALLOC (req->size); |
1964 | case EIO_READ: ALLOC (req->size); |
1699 | req->result = req->offs >= 0 |
1965 | req->result = req->offs >= 0 |
1700 | ? pread (req->int1, req->ptr2, req->size, req->offs) |
1966 | ? pread (req->int1, req->ptr2, req->size, req->offs) |
… | |
… | |
1734 | case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break; |
2000 | case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break; |
1735 | case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break; |
2001 | case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break; |
1736 | case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break; |
2002 | case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break; |
1737 | case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; |
2003 | case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; |
1738 | |
2004 | |
|
|
2005 | case EIO_REALPATH: eio__realpath (req, self); break; |
|
|
2006 | |
1739 | case EIO_READLINK: ALLOC (PATH_MAX); |
2007 | case EIO_READLINK: ALLOC (PATH_MAX); |
1740 | req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break; |
2008 | req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break; |
1741 | |
2009 | |
1742 | case EIO_SYNC: req->result = 0; sync (); break; |
2010 | case EIO_SYNC: req->result = 0; sync (); break; |
1743 | case EIO_FSYNC: req->result = fsync (req->int1); break; |
2011 | case EIO_FSYNC: req->result = fsync (req->int1); break; |
… | |
… | |
1745 | case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break; |
2013 | case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break; |
1746 | case EIO_MTOUCH: req->result = eio__mtouch (req); break; |
2014 | case EIO_MTOUCH: req->result = eio__mtouch (req); break; |
1747 | case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break; |
2015 | case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break; |
1748 | case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break; |
2016 | case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break; |
1749 | case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; |
2017 | case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; |
|
|
2018 | case EIO_FALLOCATE: req->result = eio__fallocate (req->int1, req->int2, req->offs, req->size); break; |
1750 | |
2019 | |
1751 | case EIO_READDIR: eio__scandir (req, self); break; |
2020 | case EIO_READDIR: eio__scandir (req, self); break; |
1752 | |
2021 | |
1753 | case EIO_BUSY: |
2022 | case EIO_BUSY: |
1754 | #ifdef _WIN32 |
2023 | #ifdef _WIN32 |
… | |
… | |
1854 | eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
2123 | eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
1855 | { |
2124 | { |
1856 | REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; |
2125 | REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; |
1857 | } |
2126 | } |
1858 | |
2127 | |
|
|
2128 | eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data) |
|
|
2129 | { |
|
|
2130 | REQ (EIO_FALLOCATE); req->int1 = fd; req->int2 = mode; req->offs = offset; req->size = len; SEND; |
|
|
2131 | } |
|
|
2132 | |
1859 | eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) |
2133 | eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) |
1860 | { |
2134 | { |
1861 | REQ (EIO_FDATASYNC); req->int1 = fd; SEND; |
2135 | REQ (EIO_FDATASYNC); req->int1 = fd; SEND; |
1862 | } |
2136 | } |
1863 | |
2137 | |
… | |
… | |
1958 | } |
2232 | } |
1959 | |
2233 | |
1960 | eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data) |
2234 | eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data) |
1961 | { |
2235 | { |
1962 | return eio__1path (EIO_READLINK, path, pri, cb, data); |
2236 | return eio__1path (EIO_READLINK, path, pri, cb, data); |
|
|
2237 | } |
|
|
2238 | |
|
|
2239 | eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data) |
|
|
2240 | { |
|
|
2241 | return eio__1path (EIO_REALPATH, path, pri, cb, data); |
1963 | } |
2242 | } |
1964 | |
2243 | |
1965 | eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data) |
2244 | eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data) |
1966 | { |
2245 | { |
1967 | return eio__1path (EIO_STAT, path, pri, cb, data); |
2246 | return eio__1path (EIO_STAT, path, pri, cb, data); |
… | |
… | |
2026 | eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2305 | eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2027 | { |
2306 | { |
2028 | return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); |
2307 | return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); |
2029 | } |
2308 | } |
2030 | |
2309 | |
2031 | eio_req *eio_custom (void (*)(eio_req *) execute, int pri, eio_cb cb, void *data); |
2310 | eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data) |
2032 | { |
2311 | { |
2033 | REQ (EIO_CUSTOM); req->feed = execute; SEND; |
2312 | REQ (EIO_CUSTOM); req->feed = execute; SEND; |
2034 | } |
2313 | } |
2035 | |
2314 | |
2036 | #endif |
2315 | #endif |
… | |
… | |
2047 | #undef SEND |
2326 | #undef SEND |
2048 | |
2327 | |
2049 | /*****************************************************************************/ |
2328 | /*****************************************************************************/ |
2050 | /* grp functions */ |
2329 | /* grp functions */ |
2051 | |
2330 | |
|
|
2331 | void |
2052 | void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) |
2332 | eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) |
2053 | { |
2333 | { |
2054 | grp->int2 = limit; |
2334 | grp->int2 = limit; |
2055 | grp->feed = feed; |
2335 | grp->feed = feed; |
2056 | |
2336 | |
2057 | grp_try_feed (grp); |
2337 | grp_try_feed (grp); |
2058 | } |
2338 | } |
2059 | |
2339 | |
|
|
2340 | void |
2060 | void eio_grp_limit (eio_req *grp, int limit) |
2341 | eio_grp_limit (eio_req *grp, int limit) |
2061 | { |
2342 | { |
2062 | grp->int2 = limit; |
2343 | grp->int2 = limit; |
2063 | |
2344 | |
2064 | grp_try_feed (grp); |
2345 | grp_try_feed (grp); |
2065 | } |
2346 | } |
2066 | |
2347 | |
|
|
2348 | void |
2067 | void eio_grp_add (eio_req *grp, eio_req *req) |
2349 | eio_grp_add (eio_req *grp, eio_req *req) |
2068 | { |
2350 | { |
2069 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
2351 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
2070 | |
2352 | |
2071 | grp->flags |= EIO_FLAG_GROUPADD; |
2353 | grp->flags |= EIO_FLAG_GROUPADD; |
2072 | |
2354 | |
… | |
… | |
2083 | } |
2365 | } |
2084 | |
2366 | |
2085 | /*****************************************************************************/ |
2367 | /*****************************************************************************/ |
2086 | /* misc garbage */ |
2368 | /* misc garbage */ |
2087 | |
2369 | |
|
|
2370 | ssize_t |
2088 | ssize_t eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) |
2371 | eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) |
2089 | { |
2372 | { |
2090 | etp_worker wrk; |
2373 | etp_worker wrk; |
2091 | ssize_t ret; |
2374 | ssize_t ret; |
2092 | |
2375 | |
2093 | wrk.dbuf = 0; |
2376 | wrk.dbuf = 0; |