… | |
… | |
52 | |
52 | |
53 | #ifndef ETP_TYPE_GROUP |
53 | #ifndef ETP_TYPE_GROUP |
54 | # define ETP_TYPE_GROUP 1 |
54 | # define ETP_TYPE_GROUP 1 |
55 | #endif |
55 | #endif |
56 | |
56 | |
|
|
57 | #ifndef ETP_WANT_POLL |
|
|
58 | # define ETP_WANT_POLL(pool) pool->want_poll_cb (pool->userdata) |
|
|
59 | #endif |
|
|
60 | #ifndef ETP_DONE_POLL |
|
|
61 | # define ETP_DONE_POLL(pool) pool->done_poll_cb (pool->userdata) |
|
|
62 | #endif |
|
|
63 | |
57 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
64 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
58 | |
65 | |
59 | #define ETP_TICKS ((1000000 + 1023) >> 10) |
66 | #define ETP_TICKS ((1000000 + 1023) >> 10) |
60 | |
67 | |
61 | enum { |
68 | enum { |
… | |
… | |
68 | etp_tvdiff (struct timeval *tv1, struct timeval *tv2) |
75 | etp_tvdiff (struct timeval *tv1, struct timeval *tv2) |
69 | { |
76 | { |
70 | return (tv2->tv_sec - tv1->tv_sec ) * ETP_TICKS |
77 | return (tv2->tv_sec - tv1->tv_sec ) * ETP_TICKS |
71 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
78 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
72 | } |
79 | } |
73 | |
|
|
74 | static unsigned int started, idle, wanted = 4; |
|
|
75 | |
|
|
76 | static void (*want_poll_cb) (void); |
|
|
77 | static void (*done_poll_cb) (void); |
|
|
78 | |
|
|
79 | static unsigned int max_poll_time; /* reslock */ |
|
|
80 | static unsigned int max_poll_reqs; /* reslock */ |
|
|
81 | |
|
|
82 | static unsigned int nreqs; /* reqlock */ |
|
|
83 | static unsigned int nready; /* reqlock */ |
|
|
84 | static unsigned int npending; /* reqlock */ |
|
|
85 | static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */ |
|
|
86 | static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */ |
|
|
87 | |
|
|
88 | static xmutex_t wrklock; |
|
|
89 | static xmutex_t reslock; |
|
|
90 | static xmutex_t reqlock; |
|
|
91 | static xcond_t reqwait; |
|
|
92 | |
80 | |
93 | struct etp_tmpbuf |
81 | struct etp_tmpbuf |
94 | { |
82 | { |
95 | void *ptr; |
83 | void *ptr; |
96 | int len; |
84 | int len; |
… | |
… | |
119 | int size; |
107 | int size; |
120 | } etp_reqq; |
108 | } etp_reqq; |
121 | |
109 | |
122 | struct etp_pool |
110 | struct etp_pool |
123 | { |
111 | { |
|
|
112 | void *userdata; |
|
|
113 | |
124 | etp_reqq req_queue; |
114 | etp_reqq req_queue; |
125 | etp_reqq res_queue; |
115 | etp_reqq res_queue; |
|
|
116 | |
|
|
117 | unsigned int started, idle, wanted; |
|
|
118 | |
|
|
119 | unsigned int max_poll_time; /* pool->reslock */ |
|
|
120 | unsigned int max_poll_reqs; /* pool->reslock */ |
|
|
121 | |
|
|
122 | unsigned int nreqs; /* pool->reqlock */ |
|
|
123 | unsigned int nready; /* pool->reqlock */ |
|
|
124 | unsigned int npending; /* pool->reqlock */ |
|
|
125 | unsigned int max_idle; /* maximum number of threads that can pool->idle indefinitely */ |
|
|
126 | unsigned int idle_timeout; /* number of seconds after which an pool->idle threads exit */ |
|
|
127 | |
|
|
128 | void (*want_poll_cb) (void *userdata); |
|
|
129 | void (*done_poll_cb) (void *userdata); |
|
|
130 | |
|
|
131 | xmutex_t wrklock; |
|
|
132 | xmutex_t reslock; |
|
|
133 | xmutex_t reqlock; |
|
|
134 | xcond_t reqwait; |
|
|
135 | |
|
|
136 | etp_worker wrk_first; |
126 | }; |
137 | }; |
127 | |
138 | |
128 | typedef struct etp_pool *etp_pool; |
139 | typedef struct etp_pool *etp_pool; |
129 | |
140 | |
130 | typedef struct etp_worker |
141 | typedef struct etp_worker |
131 | { |
142 | { |
132 | etp_pool pool; |
143 | etp_pool pool; |
133 | |
144 | |
134 | struct etp_tmpbuf tmpbuf; |
145 | struct etp_tmpbuf tmpbuf; |
135 | |
146 | |
136 | /* locked by wrklock */ |
147 | /* locked by pool->wrklock */ |
137 | struct etp_worker *prev, *next; |
148 | struct etp_worker *prev, *next; |
138 | |
149 | |
139 | xthread_t tid; |
150 | xthread_t tid; |
140 | |
151 | |
141 | #ifdef ETP_WORKER_COMMON |
152 | #ifdef ETP_WORKER_COMMON |
142 | ETP_WORKER_COMMON |
153 | ETP_WORKER_COMMON |
143 | #endif |
154 | #endif |
144 | } etp_worker; |
155 | } etp_worker; |
145 | |
156 | |
146 | static etp_worker wrk_first; /* NOT etp */ |
|
|
147 | |
|
|
148 | #define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) |
157 | #define ETP_WORKER_LOCK(wrk) X_LOCK (pool->wrklock) |
149 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) |
158 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (pool->wrklock) |
150 | |
159 | |
151 | /* worker threads management */ |
160 | /* worker threads management */ |
152 | |
161 | |
153 | static void |
162 | static void |
154 | etp_worker_clear (etp_worker *wrk) |
163 | etp_worker_clear (etp_worker *wrk) |
… | |
… | |
168 | |
177 | |
169 | ETP_API_DECL unsigned int |
178 | ETP_API_DECL unsigned int |
170 | etp_nreqs (etp_pool pool) |
179 | etp_nreqs (etp_pool pool) |
171 | { |
180 | { |
172 | int retval; |
181 | int retval; |
173 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
182 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
174 | retval = nreqs; |
183 | retval = pool->nreqs; |
175 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
184 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
176 | return retval; |
185 | return retval; |
177 | } |
186 | } |
178 | |
187 | |
179 | ETP_API_DECL unsigned int |
188 | ETP_API_DECL unsigned int |
180 | etp_nready (etp_pool pool) |
189 | etp_nready (etp_pool pool) |
181 | { |
190 | { |
182 | unsigned int retval; |
191 | unsigned int retval; |
183 | |
192 | |
184 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
193 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
185 | retval = nready; |
194 | retval = pool->nready; |
186 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
195 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
187 | |
196 | |
188 | return retval; |
197 | return retval; |
189 | } |
198 | } |
190 | |
199 | |
191 | ETP_API_DECL unsigned int |
200 | ETP_API_DECL unsigned int |
192 | etp_npending (etp_pool pool) |
201 | etp_npending (etp_pool pool) |
193 | { |
202 | { |
194 | unsigned int retval; |
203 | unsigned int retval; |
195 | |
204 | |
196 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
205 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
197 | retval = npending; |
206 | retval = pool->npending; |
198 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
207 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
199 | |
208 | |
200 | return retval; |
209 | return retval; |
201 | } |
210 | } |
202 | |
211 | |
203 | ETP_API_DECL unsigned int |
212 | ETP_API_DECL unsigned int |
204 | etp_nthreads (etp_pool pool) |
213 | etp_nthreads (etp_pool pool) |
205 | { |
214 | { |
206 | unsigned int retval; |
215 | unsigned int retval; |
207 | |
216 | |
208 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
217 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
209 | retval = started; |
218 | retval = pool->started; |
210 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
219 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
211 | |
220 | |
212 | return retval; |
221 | return retval; |
213 | } |
222 | } |
214 | |
223 | |
215 | static void ecb_noinline ecb_cold |
224 | static void ecb_noinline ecb_cold |
… | |
… | |
265 | |
274 | |
266 | abort (); |
275 | abort (); |
267 | } |
276 | } |
268 | |
277 | |
269 | ETP_API_DECL int ecb_cold |
278 | ETP_API_DECL int ecb_cold |
270 | etp_init (etp_pool pool, void (*want_poll)(void), void (*done_poll)(void)) |
279 | etp_init (etp_pool pool, void *userdata, void (*want_poll)(void *userdata), void (*done_poll)(void *userdata)) |
271 | { |
280 | { |
272 | X_MUTEX_CREATE (wrklock); |
281 | X_MUTEX_CREATE (pool->wrklock); |
273 | X_MUTEX_CREATE (reslock); |
282 | X_MUTEX_CREATE (pool->reslock); |
274 | X_MUTEX_CREATE (reqlock); |
283 | X_MUTEX_CREATE (pool->reqlock); |
275 | X_COND_CREATE (reqwait); |
284 | X_COND_CREATE (pool->reqwait); |
276 | |
285 | |
277 | reqq_init (&pool->req_queue); |
286 | reqq_init (&pool->req_queue); |
278 | reqq_init (&pool->res_queue); |
287 | reqq_init (&pool->res_queue); |
279 | |
288 | |
280 | wrk_first.next = |
289 | pool->wrk_first.next = |
281 | wrk_first.prev = &wrk_first; |
290 | pool->wrk_first.prev = &pool->wrk_first; |
282 | |
291 | |
283 | started = 0; |
292 | pool->started = 0; |
284 | idle = 0; |
293 | pool->idle = 0; |
285 | nreqs = 0; |
294 | pool->nreqs = 0; |
286 | nready = 0; |
295 | pool->nready = 0; |
287 | npending = 0; |
296 | pool->npending = 0; |
|
|
297 | pool->wanted = 4; |
288 | |
298 | |
|
|
299 | pool->max_idle = 4; /* maximum number of threads that can pool->idle indefinitely */ |
|
|
300 | pool->idle_timeout = 10; /* number of seconds after which an pool->idle threads exit */ |
|
|
301 | |
|
|
302 | pool->userdata = userdata; |
289 | want_poll_cb = want_poll; |
303 | pool->want_poll_cb = want_poll; |
290 | done_poll_cb = done_poll; |
304 | pool->done_poll_cb = done_poll; |
291 | |
305 | |
292 | return 0; |
306 | return 0; |
293 | } |
307 | } |
294 | |
308 | |
295 | static void ecb_noinline ecb_cold |
309 | static void ecb_noinline ecb_cold |
… | |
… | |
323 | |
337 | |
324 | for (;;) |
338 | for (;;) |
325 | { |
339 | { |
326 | ts.tv_sec = 0; |
340 | ts.tv_sec = 0; |
327 | |
341 | |
328 | X_LOCK (reqlock); |
342 | X_LOCK (pool->reqlock); |
329 | |
343 | |
330 | for (;;) |
344 | for (;;) |
331 | { |
345 | { |
332 | req = reqq_shift (&pool->req_queue); |
346 | req = reqq_shift (&pool->req_queue); |
333 | |
347 | |
334 | if (req) |
348 | if (req) |
335 | break; |
349 | break; |
336 | |
350 | |
337 | if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
351 | if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
338 | { |
352 | { |
339 | X_UNLOCK (reqlock); |
353 | X_UNLOCK (pool->reqlock); |
340 | X_LOCK (wrklock); |
354 | X_LOCK (pool->wrklock); |
341 | --started; |
355 | --pool->started; |
342 | X_UNLOCK (wrklock); |
356 | X_UNLOCK (pool->wrklock); |
343 | goto quit; |
357 | goto quit; |
344 | } |
358 | } |
345 | |
359 | |
346 | ++idle; |
360 | ++pool->idle; |
347 | |
361 | |
348 | if (idle <= max_idle) |
362 | if (pool->idle <= pool->max_idle) |
349 | /* we are allowed to idle, so do so without any timeout */ |
363 | /* we are allowed to pool->idle, so do so without any timeout */ |
350 | X_COND_WAIT (reqwait, reqlock); |
364 | X_COND_WAIT (pool->reqwait, pool->reqlock); |
351 | else |
365 | else |
352 | { |
366 | { |
353 | /* initialise timeout once */ |
367 | /* initialise timeout once */ |
354 | if (!ts.tv_sec) |
368 | if (!ts.tv_sec) |
355 | ts.tv_sec = time (0) + idle_timeout; |
369 | ts.tv_sec = time (0) + pool->idle_timeout; |
356 | |
370 | |
357 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
371 | if (X_COND_TIMEDWAIT (pool->reqwait, pool->reqlock, ts) == ETIMEDOUT) |
358 | ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
372 | ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
359 | } |
373 | } |
360 | |
374 | |
361 | --idle; |
375 | --pool->idle; |
362 | } |
376 | } |
363 | |
377 | |
364 | --nready; |
378 | --pool->nready; |
365 | |
379 | |
366 | X_UNLOCK (reqlock); |
380 | X_UNLOCK (pool->reqlock); |
367 | |
381 | |
368 | if (req->type == ETP_TYPE_QUIT) |
382 | if (req->type == ETP_TYPE_QUIT) |
369 | goto quit; |
383 | goto quit; |
370 | |
384 | |
371 | ETP_EXECUTE (self, req); |
385 | ETP_EXECUTE (self, req); |
372 | |
386 | |
373 | X_LOCK (reslock); |
387 | X_LOCK (pool->reslock); |
374 | |
388 | |
375 | ++npending; |
389 | ++pool->npending; |
376 | |
390 | |
377 | if (!reqq_push (&pool->res_queue, req) && want_poll_cb) |
391 | if (!reqq_push (&pool->res_queue, req)) |
378 | want_poll_cb (); |
392 | ETP_WANT_POLL (poll); |
379 | |
393 | |
380 | etp_worker_clear (self); |
394 | etp_worker_clear (self); |
381 | |
395 | |
382 | X_UNLOCK (reslock); |
396 | X_UNLOCK (pool->reslock); |
383 | } |
397 | } |
384 | |
398 | |
385 | quit: |
399 | quit: |
386 | free (req); |
400 | free (req); |
387 | |
401 | |
388 | X_LOCK (wrklock); |
402 | X_LOCK (pool->wrklock); |
389 | etp_worker_free (self); |
403 | etp_worker_free (self); |
390 | X_UNLOCK (wrklock); |
404 | X_UNLOCK (pool->wrklock); |
391 | |
405 | |
392 | return 0; |
406 | return 0; |
393 | } |
407 | } |
394 | |
408 | |
395 | static void ecb_cold |
409 | static void ecb_cold |
… | |
… | |
400 | /*TODO*/ |
414 | /*TODO*/ |
401 | assert (("unable to allocate worker thread data", wrk)); |
415 | assert (("unable to allocate worker thread data", wrk)); |
402 | |
416 | |
403 | wrk->pool = pool; |
417 | wrk->pool = pool; |
404 | |
418 | |
405 | X_LOCK (wrklock); |
419 | X_LOCK (pool->wrklock); |
406 | |
420 | |
407 | if (xthread_create (&wrk->tid, etp_proc, (void *)wrk)) |
421 | if (xthread_create (&wrk->tid, etp_proc, (void *)wrk)) |
408 | { |
422 | { |
409 | wrk->prev = &wrk_first; |
423 | wrk->prev = &wpool->rk_first; |
410 | wrk->next = wrk_first.next; |
424 | wrk->next = pool->wrk_first.next; |
411 | wrk_first.next->prev = wrk; |
425 | pool->wrk_first.next->prev = wrk; |
412 | wrk_first.next = wrk; |
426 | pool->wrk_first.next = wrk; |
413 | ++started; |
427 | ++pool->started; |
414 | } |
428 | } |
415 | else |
429 | else |
416 | free (wrk); |
430 | free (wrk); |
417 | |
431 | |
418 | X_UNLOCK (wrklock); |
432 | X_UNLOCK (pool->wrklock); |
419 | } |
433 | } |
420 | |
434 | |
421 | static void |
435 | static void |
422 | etp_maybe_start_thread (etp_pool pool) |
436 | etp_maybe_start_thread (etp_pool pool) |
423 | { |
437 | { |
424 | if (ecb_expect_true (etp_nthreads (pool) >= wanted)) |
438 | if (ecb_expect_true (etp_nthreads (pool) >= pool->wanted)) |
425 | return; |
439 | return; |
426 | |
440 | |
427 | /* todo: maybe use idle here, but might be less exact */ |
441 | /* todo: maybe use pool->idle here, but might be less exact */ |
428 | if (ecb_expect_true (0 <= (int)etp_nthreads (pool) + (int)etp_npending (pool) - (int)etp_nreqs (pool))) |
442 | if (ecb_expect_true (0 <= (int)etp_nthreads (pool) + (int)etp_npending (pool) - (int)etp_nreqs (pool))) |
429 | return; |
443 | return; |
430 | |
444 | |
431 | etp_start_thread (pool); |
445 | etp_start_thread (pool); |
432 | } |
446 | } |
… | |
… | |
437 | ETP_REQ *req = calloc (1, sizeof (ETP_REQ)); /* will be freed by worker */ |
451 | ETP_REQ *req = calloc (1, sizeof (ETP_REQ)); /* will be freed by worker */ |
438 | |
452 | |
439 | req->type = ETP_TYPE_QUIT; |
453 | req->type = ETP_TYPE_QUIT; |
440 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
454 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
441 | |
455 | |
442 | X_LOCK (reqlock); |
456 | X_LOCK (pool->reqlock); |
443 | reqq_push (&pool->req_queue, req); |
457 | reqq_push (&pool->req_queue, req); |
444 | X_COND_SIGNAL (reqwait); |
458 | X_COND_SIGNAL (pool->reqwait); |
445 | X_UNLOCK (reqlock); |
459 | X_UNLOCK (pool->reqlock); |
446 | |
460 | |
447 | X_LOCK (wrklock); |
461 | X_LOCK (pool->wrklock); |
448 | --started; |
462 | --pool->started; |
449 | X_UNLOCK (wrklock); |
463 | X_UNLOCK (pool->wrklock); |
450 | } |
464 | } |
451 | |
465 | |
452 | ETP_API_DECL int |
466 | ETP_API_DECL int |
453 | etp_poll (etp_pool pool) |
467 | etp_poll (etp_pool pool) |
454 | { |
468 | { |
455 | unsigned int maxreqs; |
469 | unsigned int maxreqs; |
456 | unsigned int maxtime; |
470 | unsigned int maxtime; |
457 | struct timeval tv_start, tv_now; |
471 | struct timeval tv_start, tv_now; |
458 | |
472 | |
459 | X_LOCK (reslock); |
473 | X_LOCK (pool->reslock); |
460 | maxreqs = max_poll_reqs; |
474 | maxreqs = pool->max_poll_reqs; |
461 | maxtime = max_poll_time; |
475 | maxtime = pool->max_poll_time; |
462 | X_UNLOCK (reslock); |
476 | X_UNLOCK (pool->reslock); |
463 | |
477 | |
464 | if (maxtime) |
478 | if (maxtime) |
465 | gettimeofday (&tv_start, 0); |
479 | gettimeofday (&tv_start, 0); |
466 | |
480 | |
467 | for (;;) |
481 | for (;;) |
468 | { |
482 | { |
469 | ETP_REQ *req; |
483 | ETP_REQ *req; |
470 | |
484 | |
471 | etp_maybe_start_thread (pool); |
485 | etp_maybe_start_thread (pool); |
472 | |
486 | |
473 | X_LOCK (reslock); |
487 | X_LOCK (pool->reslock); |
474 | req = reqq_shift (&pool->res_queue); |
488 | req = reqq_shift (&pool->res_queue); |
475 | |
489 | |
476 | if (req) |
490 | if (req) |
477 | { |
491 | { |
478 | --npending; |
492 | --pool->npending; |
479 | |
493 | |
480 | if (!pool->res_queue.size && done_poll_cb) |
494 | if (!pool->res_queue.size) |
481 | done_poll_cb (); |
495 | ETP_DONE_POLL (pool->userdata); |
482 | } |
496 | } |
483 | |
497 | |
484 | X_UNLOCK (reslock); |
498 | X_UNLOCK (pool->reslock); |
485 | |
499 | |
486 | if (!req) |
500 | if (!req) |
487 | return 0; |
501 | return 0; |
488 | |
502 | |
489 | X_LOCK (reqlock); |
503 | X_LOCK (pool->reqlock); |
490 | --nreqs; |
504 | --pool->nreqs; |
491 | X_UNLOCK (reqlock); |
505 | X_UNLOCK (pool->reqlock); |
492 | |
506 | |
493 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP && req->size)) |
507 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP && req->size)) |
494 | { |
508 | { |
495 | req->flags |= ETP_FLAG_DELAYED; /* mark request as delayed */ |
509 | req->flags |= ETP_FLAG_DELAYED; /* mark request as delayed */ |
496 | continue; |
510 | continue; |
… | |
… | |
545 | if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
559 | if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
546 | |
560 | |
547 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP)) |
561 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP)) |
548 | { |
562 | { |
549 | /* I hope this is worth it :/ */ |
563 | /* I hope this is worth it :/ */ |
550 | X_LOCK (reqlock); |
564 | X_LOCK (pool->reqlock); |
551 | ++nreqs; |
565 | ++pool->nreqs; |
552 | X_UNLOCK (reqlock); |
566 | X_UNLOCK (pool->reqlock); |
553 | |
567 | |
554 | X_LOCK (reslock); |
568 | X_LOCK (pool->reslock); |
555 | |
569 | |
556 | ++npending; |
570 | ++pool->npending; |
557 | |
571 | |
558 | if (!reqq_push (&pool->res_queue, req) && want_poll_cb) |
572 | if (!reqq_push (&pool->res_queue, req)) |
559 | want_poll_cb (); |
573 | ETP_WANT_POLL (pool); |
560 | |
574 | |
561 | X_UNLOCK (reslock); |
575 | X_UNLOCK (pool->reslock); |
562 | } |
576 | } |
563 | else |
577 | else |
564 | { |
578 | { |
565 | X_LOCK (reqlock); |
579 | X_LOCK (pool->reqlock); |
566 | ++nreqs; |
580 | ++pool->nreqs; |
567 | ++nready; |
581 | ++pool->nready; |
568 | reqq_push (&pool->req_queue, req); |
582 | reqq_push (&pool->req_queue, req); |
569 | X_COND_SIGNAL (reqwait); |
583 | X_COND_SIGNAL (pool->reqwait); |
570 | X_UNLOCK (reqlock); |
584 | X_UNLOCK (pool->reqlock); |
571 | |
585 | |
572 | etp_maybe_start_thread (pool); |
586 | etp_maybe_start_thread (pool); |
573 | } |
587 | } |
574 | } |
588 | } |
575 | |
589 | |
576 | ETP_API_DECL void ecb_cold |
590 | ETP_API_DECL void ecb_cold |
577 | etp_set_max_poll_time (etp_pool pool, double nseconds) |
591 | etp_set_max_poll_time (etp_pool pool, double nseconds) |
578 | { |
592 | { |
579 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
593 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); |
580 | max_poll_time = nseconds * ETP_TICKS; |
594 | pool->max_poll_time = nseconds * ETP_TICKS; |
581 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
595 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); |
582 | } |
596 | } |
583 | |
597 | |
584 | ETP_API_DECL void ecb_cold |
598 | ETP_API_DECL void ecb_cold |
585 | etp_set_max_poll_reqs (etp_pool pool, unsigned int maxreqs) |
599 | etp_set_max_poll_reqs (etp_pool pool, unsigned int maxreqs) |
586 | { |
600 | { |
587 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
601 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); |
588 | max_poll_reqs = maxreqs; |
602 | pool->max_poll_reqs = maxreqs; |
589 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
603 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); |
590 | } |
604 | } |
591 | |
605 | |
592 | ETP_API_DECL void ecb_cold |
606 | ETP_API_DECL void ecb_cold |
593 | etp_set_max_idle (etp_pool pool, unsigned int nthreads) |
607 | etp_set_max_idle (etp_pool pool, unsigned int nthreads) |
594 | { |
608 | { |
595 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
609 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
596 | max_idle = nthreads; |
610 | pool->max_idle = nthreads; |
597 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
611 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
598 | } |
612 | } |
599 | |
613 | |
600 | ETP_API_DECL void ecb_cold |
614 | ETP_API_DECL void ecb_cold |
601 | etp_set_idle_timeout (etp_pool pool, unsigned int seconds) |
615 | etp_set_idle_timeout (etp_pool pool, unsigned int seconds) |
602 | { |
616 | { |
603 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
617 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
604 | idle_timeout = seconds; |
618 | pool->idle_timeout = seconds; |
605 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
619 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
606 | } |
620 | } |
607 | |
621 | |
608 | ETP_API_DECL void ecb_cold |
622 | ETP_API_DECL void ecb_cold |
609 | etp_set_min_parallel (etp_pool pool, unsigned int nthreads) |
623 | etp_set_min_parallel (etp_pool pool, unsigned int nthreads) |
610 | { |
624 | { |
611 | if (wanted < nthreads) |
625 | if (pool->wanted < nthreads) |
612 | wanted = nthreads; |
626 | pool->wanted = nthreads; |
613 | } |
627 | } |
614 | |
628 | |
615 | ETP_API_DECL void ecb_cold |
629 | ETP_API_DECL void ecb_cold |
616 | etp_set_max_parallel (etp_pool pool, unsigned int nthreads) |
630 | etp_set_max_parallel (etp_pool pool, unsigned int nthreads) |
617 | { |
631 | { |
618 | if (wanted > nthreads) |
632 | if (pool->wanted > nthreads) |
619 | wanted = nthreads; |
633 | pool->wanted = nthreads; |
620 | |
634 | |
621 | while (started > wanted) |
635 | while (pool->started > pool->wanted) |
622 | etp_end_thread (pool); |
636 | etp_end_thread (pool); |
623 | } |
637 | } |
624 | |
638 | |