… | |
… | |
132 | #endif |
132 | #endif |
133 | #ifndef D_NAMLEN |
133 | #ifndef D_NAMLEN |
134 | # define D_NAMLEN(de) strlen ((de)->d_name) |
134 | # define D_NAMLEN(de) strlen ((de)->d_name) |
135 | #endif |
135 | #endif |
136 | |
136 | |
137 | /* number of seconds after which an idle threads exit */ |
|
|
138 | #define IDLE_TIMEOUT 10 |
|
|
139 | |
|
|
140 | /* used for struct dirent, AIX doesn't provide it */ |
137 | /* used for struct dirent, AIX doesn't provide it */ |
141 | #ifndef NAME_MAX |
138 | #ifndef NAME_MAX |
142 | # define NAME_MAX 4096 |
139 | # define NAME_MAX 4096 |
143 | #endif |
140 | #endif |
144 | |
141 | |
… | |
… | |
223 | static unsigned int max_poll_reqs; /* reslock */ |
220 | static unsigned int max_poll_reqs; /* reslock */ |
224 | |
221 | |
225 | static volatile unsigned int nreqs; /* reqlock */ |
222 | static volatile unsigned int nreqs; /* reqlock */ |
226 | static volatile unsigned int nready; /* reqlock */ |
223 | static volatile unsigned int nready; /* reqlock */ |
227 | static volatile unsigned int npending; /* reqlock */ |
224 | static volatile unsigned int npending; /* reqlock */ |
228 | static volatile unsigned int max_idle = 4; |
225 | static volatile unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */ |
|
|
226 | static volatile unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */ |
229 | |
227 | |
230 | static xmutex_t wrklock = X_MUTEX_INIT; |
228 | static xmutex_t wrklock; |
231 | static xmutex_t reslock = X_MUTEX_INIT; |
229 | static xmutex_t reslock; |
232 | static xmutex_t reqlock = X_MUTEX_INIT; |
230 | static xmutex_t reqlock; |
233 | static xcond_t reqwait = X_COND_INIT; |
231 | static xcond_t reqwait; |
234 | |
232 | |
235 | #if !HAVE_PREADWRITE |
233 | #if !HAVE_PREADWRITE |
236 | /* |
234 | /* |
237 | * make our pread/pwrite emulation safe against themselves, but not against |
235 | * make our pread/pwrite emulation safe against themselves, but not against |
238 | * normal read/write by using a mutex. slows down execution a lot, |
236 | * normal read/write by using a mutex. slows down execution a lot, |
… | |
… | |
368 | } |
366 | } |
369 | |
367 | |
370 | abort (); |
368 | abort (); |
371 | } |
369 | } |
372 | |
370 | |
|
|
371 | static void etp_thread_init (void) |
|
|
372 | { |
|
|
373 | X_MUTEX_CREATE (wrklock); |
|
|
374 | X_MUTEX_CREATE (reslock); |
|
|
375 | X_MUTEX_CREATE (reqlock); |
|
|
376 | X_COND_CREATE (reqwait); |
|
|
377 | } |
|
|
378 | |
373 | static void etp_atfork_prepare (void) |
379 | static void etp_atfork_prepare (void) |
374 | { |
380 | { |
375 | X_LOCK (wrklock); |
381 | X_LOCK (wrklock); |
376 | X_LOCK (reqlock); |
382 | X_LOCK (reqlock); |
377 | X_LOCK (reslock); |
383 | X_LOCK (reslock); |
… | |
… | |
415 | idle = 0; |
421 | idle = 0; |
416 | nreqs = 0; |
422 | nreqs = 0; |
417 | nready = 0; |
423 | nready = 0; |
418 | npending = 0; |
424 | npending = 0; |
419 | |
425 | |
420 | etp_atfork_parent (); |
426 | etp_thread_init (); |
421 | } |
427 | } |
422 | |
428 | |
423 | static void |
429 | static void |
424 | etp_once_init (void) |
430 | etp_once_init (void) |
425 | { |
431 | { |
|
|
432 | etp_thread_init (); |
426 | X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); |
433 | X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); |
427 | } |
434 | } |
428 | |
435 | |
429 | static int |
436 | static int |
430 | etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
437 | etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
… | |
… | |
621 | } |
628 | } |
622 | |
629 | |
623 | static void etp_set_max_idle (unsigned int nthreads) |
630 | static void etp_set_max_idle (unsigned int nthreads) |
624 | { |
631 | { |
625 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
632 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
626 | max_idle = nthreads <= 0 ? 1 : nthreads; |
633 | max_idle = nthreads; |
|
|
634 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
|
|
635 | } |
|
|
636 | |
|
|
637 | static void etp_set_idle_timeout (unsigned int seconds) |
|
|
638 | { |
|
|
639 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
|
|
640 | idle_timeout = seconds; |
627 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
641 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
628 | } |
642 | } |
629 | |
643 | |
630 | static void etp_set_min_parallel (unsigned int nthreads) |
644 | static void etp_set_min_parallel (unsigned int nthreads) |
631 | { |
645 | { |
… | |
… | |
757 | } |
771 | } |
758 | |
772 | |
759 | void eio_set_max_idle (unsigned int nthreads) |
773 | void eio_set_max_idle (unsigned int nthreads) |
760 | { |
774 | { |
761 | etp_set_max_idle (nthreads); |
775 | etp_set_max_idle (nthreads); |
|
|
776 | } |
|
|
777 | |
|
|
778 | void eio_set_idle_timeout (unsigned int seconds) |
|
|
779 | { |
|
|
780 | etp_set_idle_timeout (seconds); |
762 | } |
781 | } |
763 | |
782 | |
764 | void eio_set_min_parallel (unsigned int nthreads) |
783 | void eio_set_min_parallel (unsigned int nthreads) |
765 | { |
784 | { |
766 | etp_set_min_parallel (nthreads); |
785 | etp_set_min_parallel (nthreads); |
… | |
… | |
1041 | } |
1060 | } |
1042 | |
1061 | |
1043 | static signed char |
1062 | static signed char |
1044 | eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) |
1063 | eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) |
1045 | { |
1064 | { |
1046 | return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */ |
1065 | return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */ |
1047 | : a->inode < b->inode ? -1 : a->inode > b->inode ? 1 : 0; |
1066 | : a->inode < b->inode ? -1 |
|
|
1067 | : a->inode > b->inode ? 1 |
|
|
1068 | : 0; |
1048 | } |
1069 | } |
1049 | |
1070 | |
1050 | #define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0 |
1071 | #define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0 |
1051 | |
1072 | |
1052 | #define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */ |
1073 | #define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */ |
… | |
… | |
1058 | unsigned char bits [9 + sizeof (ino_t) * 8]; |
1079 | unsigned char bits [9 + sizeof (ino_t) * 8]; |
1059 | unsigned char *bit = bits; |
1080 | unsigned char *bit = bits; |
1060 | |
1081 | |
1061 | assert (CHAR_BIT == 8); |
1082 | assert (CHAR_BIT == 8); |
1062 | assert (sizeof (eio_dirent) * 8 < 256); |
1083 | assert (sizeof (eio_dirent) * 8 < 256); |
1063 | assert (offsetof (eio_dirent, inode)); /* we use 0 as sentinel */ |
1084 | assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */ |
1064 | assert (offsetof (eio_dirent, score)); /* we use 0 as sentinel */ |
1085 | assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */ |
1065 | |
1086 | |
1066 | if (size <= EIO_SORT_FAST) |
1087 | if (size <= EIO_SORT_FAST) |
1067 | return; |
1088 | return; |
1068 | |
1089 | |
1069 | /* first prepare an array of bits to test in our radix sort */ |
1090 | /* first prepare an array of bits to test in our radix sort */ |
… | |
… | |
1224 | flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER); |
1245 | flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER); |
1225 | |
1246 | |
1226 | X_LOCK (wrklock); |
1247 | X_LOCK (wrklock); |
1227 | /* the corresponding closedir is in ETP_WORKER_CLEAR */ |
1248 | /* the corresponding closedir is in ETP_WORKER_CLEAR */ |
1228 | self->dirp = dirp = opendir (req->ptr1); |
1249 | self->dirp = dirp = opendir (req->ptr1); |
|
|
1250 | |
1229 | req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; |
1251 | req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; |
1230 | req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; |
1252 | req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; |
1231 | req->ptr2 = names = malloc (namesalloc); |
1253 | req->ptr2 = names = malloc (namesalloc); |
1232 | X_UNLOCK (wrklock); |
1254 | X_UNLOCK (wrklock); |
1233 | |
1255 | |
… | |
… | |
1245 | /* sort etc. */ |
1267 | /* sort etc. */ |
1246 | req->int1 = flags; |
1268 | req->int1 = flags; |
1247 | req->result = dentoffs; |
1269 | req->result = dentoffs; |
1248 | |
1270 | |
1249 | if (flags & EIO_READDIR_STAT_ORDER) |
1271 | if (flags & EIO_READDIR_STAT_ORDER) |
1250 | eio_dent_sort (dents, dentoffs, 0, inode_bits); /* sort by inode exclusively */ |
1272 | eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits); |
1251 | else if (flags & EIO_READDIR_DIRS_FIRST) |
1273 | else if (flags & EIO_READDIR_DIRS_FIRST) |
1252 | if (flags & EIO_READDIR_FOUND_UNKNOWN) |
1274 | if (flags & EIO_READDIR_FOUND_UNKNOWN) |
1253 | eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */ |
1275 | eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */ |
1254 | else |
1276 | else |
1255 | { |
1277 | { |
… | |
… | |
1551 | if (req) |
1573 | if (req) |
1552 | break; |
1574 | break; |
1553 | |
1575 | |
1554 | ++idle; |
1576 | ++idle; |
1555 | |
1577 | |
1556 | ts.tv_sec = time (0) + IDLE_TIMEOUT; |
1578 | ts.tv_sec = time (0) + idle_timeout; |
1557 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
1579 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
1558 | { |
1580 | { |
1559 | if (idle > max_idle) |
1581 | if (idle > max_idle) |
1560 | { |
1582 | { |
1561 | --idle; |
1583 | --idle; |