1 | /* |
1 | /* |
2 | * libetp implementation |
2 | * libetp implementation |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libetp@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2015 Marc Alexander Lehmann <libetp@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
35 | * and other provisions required by the GPL. If you do not delete the |
35 | * and other provisions required by the GPL. If you do not delete the |
36 | * provisions above, a recipient may use your version of this file under |
36 | * provisions above, a recipient may use your version of this file under |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
|
|
40 | #if HAVE_SYS_PRCTL_H |
|
|
41 | # include <sys/prctl.h> |
|
|
42 | #endif |
|
|
43 | |
|
|
44 | #ifdef EIO_STACKSIZE |
|
|
45 | # define X_STACKSIZE EIO_STACKSIZE |
|
|
46 | #endif |
|
|
47 | #include "xthread.h" |
|
|
48 | |
40 | #ifndef ETP_API_DECL |
49 | #ifndef ETP_API_DECL |
41 | # define ETP_API_DECL static |
50 | # define ETP_API_DECL static |
42 | #endif |
51 | #endif |
43 | |
52 | |
44 | #ifndef ETP_PRI_MIN |
53 | #ifndef ETP_PRI_MIN |
… | |
… | |
52 | |
61 | |
53 | #ifndef ETP_TYPE_GROUP |
62 | #ifndef ETP_TYPE_GROUP |
54 | # define ETP_TYPE_GROUP 1 |
63 | # define ETP_TYPE_GROUP 1 |
55 | #endif |
64 | #endif |
56 | |
65 | |
|
|
66 | #ifndef ETP_WANT_POLL |
|
|
67 | # define ETP_WANT_POLL(pool) pool->want_poll_cb (pool->userdata) |
|
|
68 | #endif |
|
|
69 | #ifndef ETP_DONE_POLL |
|
|
70 | # define ETP_DONE_POLL(pool) pool->done_poll_cb (pool->userdata) |
|
|
71 | #endif |
|
|
72 | |
57 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
73 | #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
58 | |
74 | |
59 | #define ETP_TICKS ((1000000 + 1023) >> 10) |
75 | #define ETP_TICKS ((1000000 + 1023) >> 10) |
|
|
76 | |
|
|
77 | enum { |
|
|
78 | ETP_FLAG_GROUPADD = 0x04, /* some request was added to the group */ |
|
|
79 | ETP_FLAG_DELAYED = 0x08, /* groiup request has been delayed */ |
|
|
80 | }; |
60 | |
81 | |
61 | /* calculate time difference in ~1/ETP_TICKS of a second */ |
82 | /* calculate time difference in ~1/ETP_TICKS of a second */ |
62 | ecb_inline int |
83 | ecb_inline int |
63 | etp_tvdiff (struct timeval *tv1, struct timeval *tv2) |
84 | etp_tvdiff (struct timeval *tv1, struct timeval *tv2) |
64 | { |
85 | { |
65 | return (tv2->tv_sec - tv1->tv_sec ) * ETP_TICKS |
86 | return (tv2->tv_sec - tv1->tv_sec ) * ETP_TICKS |
66 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
87 | + ((tv2->tv_usec - tv1->tv_usec) >> 10); |
67 | } |
88 | } |
68 | |
89 | |
69 | static unsigned int started, idle, wanted = 4; |
90 | struct etp_tmpbuf |
70 | |
|
|
71 | static void (*want_poll_cb) (void); |
|
|
72 | static void (*done_poll_cb) (void); |
|
|
73 | |
|
|
74 | static unsigned int max_poll_time; /* reslock */ |
|
|
75 | static unsigned int max_poll_reqs; /* reslock */ |
|
|
76 | |
|
|
77 | static unsigned int nreqs; /* reqlock */ |
|
|
78 | static unsigned int nready; /* reqlock */ |
|
|
79 | static unsigned int npending; /* reqlock */ |
|
|
80 | static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */ |
|
|
81 | static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */ |
|
|
82 | |
|
|
83 | static xmutex_t wrklock; |
|
|
84 | static xmutex_t reslock; |
|
|
85 | static xmutex_t reqlock; |
|
|
86 | static xcond_t reqwait; |
|
|
87 | |
|
|
88 | typedef struct etp_worker |
|
|
89 | { |
91 | { |
90 | struct tmpbuf tmpbuf; |
92 | void *ptr; |
|
|
93 | int len; |
|
|
94 | }; |
91 | |
95 | |
92 | /* locked by wrklock */ |
|
|
93 | struct etp_worker *prev, *next; |
|
|
94 | |
|
|
95 | xthread_t tid; |
|
|
96 | |
|
|
97 | #ifdef ETP_WORKER_COMMON |
|
|
98 | ETP_WORKER_COMMON |
|
|
99 | #endif |
|
|
100 | } etp_worker; |
|
|
101 | |
|
|
102 | static etp_worker wrk_first; /* NOT etp */ |
|
|
103 | |
|
|
104 | #define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) |
|
|
105 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) |
|
|
106 | |
|
|
107 | /* worker threads management */ |
|
|
108 | |
|
|
109 | static void |
96 | static void * |
110 | etp_worker_clear (etp_worker *wrk) |
97 | etp_tmpbuf_get (struct etp_tmpbuf *buf, int len) |
111 | { |
98 | { |
112 | } |
99 | if (buf->len < len) |
|
|
100 | { |
|
|
101 | free (buf->ptr); |
|
|
102 | buf->ptr = malloc (buf->len = len); |
|
|
103 | } |
113 | |
104 | |
114 | static void ecb_cold |
105 | return buf->ptr; |
115 | etp_worker_free (etp_worker *wrk) |
|
|
116 | { |
|
|
117 | free (wrk->tmpbuf.ptr); |
|
|
118 | |
|
|
119 | wrk->next->prev = wrk->prev; |
|
|
120 | wrk->prev->next = wrk->next; |
|
|
121 | |
|
|
122 | free (wrk); |
|
|
123 | } |
|
|
124 | |
|
|
125 | ETP_API_DECL unsigned int |
|
|
126 | etp_nreqs (void) |
|
|
127 | { |
|
|
128 | int retval; |
|
|
129 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
|
|
130 | retval = nreqs; |
|
|
131 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
|
|
132 | return retval; |
|
|
133 | } |
|
|
134 | |
|
|
135 | ETP_API_DECL unsigned int |
|
|
136 | etp_nready (void) |
|
|
137 | { |
|
|
138 | unsigned int retval; |
|
|
139 | |
|
|
140 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
|
|
141 | retval = nready; |
|
|
142 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
|
|
143 | |
|
|
144 | return retval; |
|
|
145 | } |
|
|
146 | |
|
|
147 | ETP_API_DECL unsigned int |
|
|
148 | etp_npending (void) |
|
|
149 | { |
|
|
150 | unsigned int retval; |
|
|
151 | |
|
|
152 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
|
|
153 | retval = npending; |
|
|
154 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
|
|
155 | |
|
|
156 | return retval; |
|
|
157 | } |
|
|
158 | |
|
|
159 | ETP_API_DECL unsigned int |
|
|
160 | etp_nthreads (void) |
|
|
161 | { |
|
|
162 | unsigned int retval; |
|
|
163 | |
|
|
164 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
|
|
165 | retval = started; |
|
|
166 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
|
|
167 | |
|
|
168 | return retval; |
|
|
169 | } |
106 | } |
170 | |
107 | |
171 | /* |
108 | /* |
172 | * a somewhat faster data structure might be nice, but |
109 | * a somewhat faster data structure might be nice, but |
173 | * with 8 priorities this actually needs <20 insns |
110 | * with 8 priorities this actually needs <20 insns |
174 | * per shift, the most expensive operation. |
111 | * per shift, the most expensive operation. |
175 | */ |
112 | */ |
176 | typedef struct { |
113 | typedef struct |
|
|
114 | { |
177 | ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */ |
115 | ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */ |
178 | int size; |
116 | int size; |
179 | } etp_reqq; |
117 | } etp_reqq; |
180 | |
118 | |
|
|
119 | typedef struct etp_pool *etp_pool; |
|
|
120 | |
|
|
121 | typedef struct etp_worker |
|
|
122 | { |
|
|
123 | etp_pool pool; |
|
|
124 | |
|
|
125 | struct etp_tmpbuf tmpbuf; |
|
|
126 | |
|
|
127 | /* locked by pool->wrklock */ |
|
|
128 | struct etp_worker *prev, *next; |
|
|
129 | |
|
|
130 | xthread_t tid; |
|
|
131 | |
|
|
132 | #ifdef ETP_WORKER_COMMON |
|
|
133 | ETP_WORKER_COMMON |
|
|
134 | #endif |
|
|
135 | } etp_worker; |
|
|
136 | |
|
|
137 | struct etp_pool |
|
|
138 | { |
|
|
139 | void *userdata; |
|
|
140 | |
181 | static etp_reqq req_queue; |
141 | etp_reqq req_queue; |
182 | static etp_reqq res_queue; |
142 | etp_reqq res_queue; |
|
|
143 | |
|
|
144 | unsigned int started, idle, wanted; |
|
|
145 | |
|
|
146 | unsigned int max_poll_time; /* pool->reslock */ |
|
|
147 | unsigned int max_poll_reqs; /* pool->reslock */ |
|
|
148 | |
|
|
149 | unsigned int nreqs; /* pool->reqlock */ |
|
|
150 | unsigned int nready; /* pool->reqlock */ |
|
|
151 | unsigned int npending; /* pool->reqlock */ |
|
|
152 | unsigned int max_idle; /* maximum number of threads that can pool->idle indefinitely */ |
|
|
153 | unsigned int idle_timeout; /* number of seconds after which an pool->idle threads exit */ |
|
|
154 | |
|
|
155 | void (*want_poll_cb) (void *userdata); |
|
|
156 | void (*done_poll_cb) (void *userdata); |
|
|
157 | |
|
|
158 | xmutex_t wrklock; |
|
|
159 | xmutex_t reslock; |
|
|
160 | xmutex_t reqlock; |
|
|
161 | xcond_t reqwait; |
|
|
162 | |
|
|
163 | etp_worker wrk_first; |
|
|
164 | }; |
|
|
165 | |
|
|
166 | #define ETP_WORKER_LOCK(wrk) X_LOCK (pool->wrklock) |
|
|
167 | #define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (pool->wrklock) |
|
|
168 | |
|
|
169 | /* worker threads management */ |
|
|
170 | |
|
|
171 | static void |
|
|
172 | etp_worker_clear (etp_worker *wrk) |
|
|
173 | { |
|
|
174 | } |
|
|
175 | |
|
|
176 | static void ecb_cold |
|
|
177 | etp_worker_free (etp_worker *wrk) |
|
|
178 | { |
|
|
179 | free (wrk->tmpbuf.ptr); |
|
|
180 | |
|
|
181 | wrk->next->prev = wrk->prev; |
|
|
182 | wrk->prev->next = wrk->next; |
|
|
183 | |
|
|
184 | free (wrk); |
|
|
185 | } |
|
|
186 | |
|
|
187 | ETP_API_DECL unsigned int |
|
|
188 | etp_nreqs (etp_pool pool) |
|
|
189 | { |
|
|
190 | int retval; |
|
|
191 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
|
|
192 | retval = pool->nreqs; |
|
|
193 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
|
|
194 | return retval; |
|
|
195 | } |
|
|
196 | |
|
|
197 | ETP_API_DECL unsigned int |
|
|
198 | etp_nready (etp_pool pool) |
|
|
199 | { |
|
|
200 | unsigned int retval; |
|
|
201 | |
|
|
202 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
|
|
203 | retval = pool->nready; |
|
|
204 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
|
|
205 | |
|
|
206 | return retval; |
|
|
207 | } |
|
|
208 | |
|
|
209 | ETP_API_DECL unsigned int |
|
|
210 | etp_npending (etp_pool pool) |
|
|
211 | { |
|
|
212 | unsigned int retval; |
|
|
213 | |
|
|
214 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
|
|
215 | retval = pool->npending; |
|
|
216 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
|
|
217 | |
|
|
218 | return retval; |
|
|
219 | } |
|
|
220 | |
|
|
221 | ETP_API_DECL unsigned int |
|
|
222 | etp_nthreads (etp_pool pool) |
|
|
223 | { |
|
|
224 | unsigned int retval; |
|
|
225 | |
|
|
226 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
|
|
227 | retval = pool->started; |
|
|
228 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
|
|
229 | |
|
|
230 | return retval; |
|
|
231 | } |
183 | |
232 | |
184 | static void ecb_noinline ecb_cold |
233 | static void ecb_noinline ecb_cold |
185 | reqq_init (etp_reqq *q) |
234 | reqq_init (etp_reqq *q) |
186 | { |
235 | { |
187 | int pri; |
236 | int pri; |
… | |
… | |
234 | |
283 | |
235 | abort (); |
284 | abort (); |
236 | } |
285 | } |
237 | |
286 | |
238 | ETP_API_DECL int ecb_cold |
287 | ETP_API_DECL int ecb_cold |
239 | etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
288 | etp_init (etp_pool pool, void *userdata, void (*want_poll)(void *userdata), void (*done_poll)(void *userdata)) |
240 | { |
289 | { |
241 | X_MUTEX_CREATE (wrklock); |
290 | X_MUTEX_CREATE (pool->wrklock); |
242 | X_MUTEX_CREATE (reslock); |
291 | X_MUTEX_CREATE (pool->reslock); |
243 | X_MUTEX_CREATE (reqlock); |
292 | X_MUTEX_CREATE (pool->reqlock); |
244 | X_COND_CREATE (reqwait); |
293 | X_COND_CREATE (pool->reqwait); |
245 | |
294 | |
246 | reqq_init (&req_queue); |
295 | reqq_init (&pool->req_queue); |
247 | reqq_init (&res_queue); |
296 | reqq_init (&pool->res_queue); |
248 | |
297 | |
249 | wrk_first.next = |
298 | pool->wrk_first.next = |
250 | wrk_first.prev = &wrk_first; |
299 | pool->wrk_first.prev = &pool->wrk_first; |
251 | |
300 | |
252 | started = 0; |
301 | pool->started = 0; |
253 | idle = 0; |
302 | pool->idle = 0; |
254 | nreqs = 0; |
303 | pool->nreqs = 0; |
255 | nready = 0; |
304 | pool->nready = 0; |
256 | npending = 0; |
305 | pool->npending = 0; |
|
|
306 | pool->wanted = 4; |
257 | |
307 | |
|
|
308 | pool->max_idle = 4; /* maximum number of threads that can pool->idle indefinitely */ |
|
|
309 | pool->idle_timeout = 10; /* number of seconds after which an pool->idle threads exit */ |
|
|
310 | |
|
|
311 | pool->userdata = userdata; |
258 | want_poll_cb = want_poll; |
312 | pool->want_poll_cb = want_poll; |
259 | done_poll_cb = done_poll; |
313 | pool->done_poll_cb = done_poll; |
260 | |
314 | |
261 | return 0; |
315 | return 0; |
262 | } |
316 | } |
263 | |
317 | |
264 | /* not yet in etp.c */ |
318 | static void ecb_noinline ecb_cold |
|
|
319 | etp_proc_init (void) |
|
|
320 | { |
|
|
321 | #if HAVE_PRCTL_SET_NAME |
|
|
322 | /* provide a more sensible "thread name" */ |
|
|
323 | char name[16 + 1]; |
|
|
324 | const int namelen = sizeof (name) - 1; |
|
|
325 | int len; |
|
|
326 | |
|
|
327 | prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0); |
|
|
328 | name [namelen] = 0; |
|
|
329 | len = strlen (name); |
|
|
330 | strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio"); |
|
|
331 | prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0); |
|
|
332 | #endif |
|
|
333 | } |
|
|
334 | |
265 | X_THREAD_PROC (etp_proc); |
335 | X_THREAD_PROC (etp_proc) |
|
|
336 | { |
|
|
337 | ETP_REQ *req; |
|
|
338 | struct timespec ts; |
|
|
339 | etp_worker *self = (etp_worker *)thr_arg; |
|
|
340 | etp_pool pool = self->pool; |
|
|
341 | |
|
|
342 | etp_proc_init (); |
|
|
343 | |
|
|
344 | /* try to distribute timeouts somewhat evenly */ |
|
|
345 | ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); |
|
|
346 | |
|
|
347 | for (;;) |
|
|
348 | { |
|
|
349 | ts.tv_sec = 0; |
|
|
350 | |
|
|
351 | X_LOCK (pool->reqlock); |
|
|
352 | |
|
|
353 | for (;;) |
|
|
354 | { |
|
|
355 | req = reqq_shift (&pool->req_queue); |
|
|
356 | |
|
|
357 | if (ecb_expect_true (req)) |
|
|
358 | break; |
|
|
359 | |
|
|
360 | if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
|
|
361 | { |
|
|
362 | X_UNLOCK (pool->reqlock); |
|
|
363 | X_LOCK (pool->wrklock); |
|
|
364 | --pool->started; |
|
|
365 | X_UNLOCK (pool->wrklock); |
|
|
366 | goto quit; |
|
|
367 | } |
|
|
368 | |
|
|
369 | ++pool->idle; |
|
|
370 | |
|
|
371 | if (pool->idle <= pool->max_idle) |
|
|
372 | /* we are allowed to pool->idle, so do so without any timeout */ |
|
|
373 | X_COND_WAIT (pool->reqwait, pool->reqlock); |
|
|
374 | else |
|
|
375 | { |
|
|
376 | /* initialise timeout once */ |
|
|
377 | if (!ts.tv_sec) |
|
|
378 | ts.tv_sec = time (0) + pool->idle_timeout; |
|
|
379 | |
|
|
380 | if (X_COND_TIMEDWAIT (pool->reqwait, pool->reqlock, ts) == ETIMEDOUT) |
|
|
381 | ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
|
|
382 | } |
|
|
383 | |
|
|
384 | --pool->idle; |
|
|
385 | } |
|
|
386 | |
|
|
387 | --pool->nready; |
|
|
388 | |
|
|
389 | X_UNLOCK (pool->reqlock); |
|
|
390 | |
|
|
391 | if (ecb_expect_false (req->type == ETP_TYPE_QUIT)) |
|
|
392 | goto quit; |
|
|
393 | |
|
|
394 | ETP_EXECUTE (self, req); |
|
|
395 | |
|
|
396 | X_LOCK (pool->reslock); |
|
|
397 | |
|
|
398 | ++pool->npending; |
|
|
399 | |
|
|
400 | if (!reqq_push (&pool->res_queue, req)) |
|
|
401 | ETP_WANT_POLL (pool); |
|
|
402 | |
|
|
403 | etp_worker_clear (self); |
|
|
404 | |
|
|
405 | X_UNLOCK (pool->reslock); |
|
|
406 | } |
|
|
407 | |
|
|
408 | quit: |
|
|
409 | free (req); |
|
|
410 | |
|
|
411 | X_LOCK (pool->wrklock); |
|
|
412 | etp_worker_free (self); |
|
|
413 | X_UNLOCK (pool->wrklock); |
|
|
414 | |
|
|
415 | return 0; |
|
|
416 | } |
266 | |
417 | |
267 | static void ecb_cold |
418 | static void ecb_cold |
268 | etp_start_thread (void) |
419 | etp_start_thread (etp_pool pool) |
269 | { |
420 | { |
270 | etp_worker *wrk = calloc (1, sizeof (etp_worker)); |
421 | etp_worker *wrk = calloc (1, sizeof (etp_worker)); |
271 | |
422 | |
272 | /*TODO*/ |
423 | /*TODO*/ |
273 | assert (("unable to allocate worker thread data", wrk)); |
424 | assert (("unable to allocate worker thread data", wrk)); |
274 | |
425 | |
|
|
426 | wrk->pool = pool; |
|
|
427 | |
275 | X_LOCK (wrklock); |
428 | X_LOCK (pool->wrklock); |
276 | |
429 | |
277 | if (xthread_create (&wrk->tid, etp_proc, (void *)wrk)) |
430 | if (xthread_create (&wrk->tid, etp_proc, (void *)wrk)) |
278 | { |
431 | { |
279 | wrk->prev = &wrk_first; |
432 | wrk->prev = &pool->wrk_first; |
280 | wrk->next = wrk_first.next; |
433 | wrk->next = pool->wrk_first.next; |
281 | wrk_first.next->prev = wrk; |
434 | pool->wrk_first.next->prev = wrk; |
282 | wrk_first.next = wrk; |
435 | pool->wrk_first.next = wrk; |
283 | ++started; |
436 | ++pool->started; |
284 | } |
437 | } |
285 | else |
438 | else |
286 | free (wrk); |
439 | free (wrk); |
287 | |
440 | |
288 | X_UNLOCK (wrklock); |
441 | X_UNLOCK (pool->wrklock); |
289 | } |
442 | } |
290 | |
443 | |
291 | static void |
444 | static void |
292 | etp_maybe_start_thread (void) |
445 | etp_maybe_start_thread (etp_pool pool) |
293 | { |
446 | { |
294 | if (ecb_expect_true (etp_nthreads () >= wanted)) |
447 | if (ecb_expect_true (etp_nthreads (pool) >= pool->wanted)) |
295 | return; |
448 | return; |
296 | |
449 | |
297 | /* todo: maybe use idle here, but might be less exact */ |
450 | /* todo: maybe use pool->idle here, but might be less exact */ |
298 | if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) |
451 | if (ecb_expect_true (0 <= (int)etp_nthreads (pool) + (int)etp_npending (pool) - (int)etp_nreqs (pool))) |
299 | return; |
452 | return; |
300 | |
453 | |
301 | etp_start_thread (); |
454 | etp_start_thread (pool); |
302 | } |
455 | } |
303 | |
456 | |
304 | static void ecb_cold |
457 | static void ecb_cold |
305 | etp_end_thread (void) |
458 | etp_end_thread (etp_pool pool) |
306 | { |
459 | { |
307 | ETP_REQ *req = calloc (1, sizeof (ETP_REQ)); /* will be freed by worker */ |
460 | ETP_REQ *req = calloc (1, sizeof (ETP_REQ)); /* will be freed by worker */ |
308 | |
461 | |
309 | req->type = ETP_TYPE_QUIT; |
462 | req->type = ETP_TYPE_QUIT; |
310 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
463 | req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
311 | |
464 | |
312 | X_LOCK (reqlock); |
465 | X_LOCK (pool->reqlock); |
313 | reqq_push (&req_queue, req); |
466 | reqq_push (&pool->req_queue, req); |
314 | X_COND_SIGNAL (reqwait); |
467 | X_COND_SIGNAL (pool->reqwait); |
315 | X_UNLOCK (reqlock); |
468 | X_UNLOCK (pool->reqlock); |
316 | |
469 | |
317 | X_LOCK (wrklock); |
470 | X_LOCK (pool->wrklock); |
318 | --started; |
471 | --pool->started; |
319 | X_UNLOCK (wrklock); |
472 | X_UNLOCK (pool->wrklock); |
320 | } |
473 | } |
321 | |
474 | |
322 | ETP_API_DECL int |
475 | ETP_API_DECL int |
323 | etp_poll (void) |
476 | etp_poll (etp_pool pool) |
324 | { |
477 | { |
325 | unsigned int maxreqs; |
478 | unsigned int maxreqs; |
326 | unsigned int maxtime; |
479 | unsigned int maxtime; |
327 | struct timeval tv_start, tv_now; |
480 | struct timeval tv_start, tv_now; |
328 | |
481 | |
329 | X_LOCK (reslock); |
482 | X_LOCK (pool->reslock); |
330 | maxreqs = max_poll_reqs; |
483 | maxreqs = pool->max_poll_reqs; |
331 | maxtime = max_poll_time; |
484 | maxtime = pool->max_poll_time; |
332 | X_UNLOCK (reslock); |
485 | X_UNLOCK (pool->reslock); |
333 | |
486 | |
334 | if (maxtime) |
487 | if (maxtime) |
335 | gettimeofday (&tv_start, 0); |
488 | gettimeofday (&tv_start, 0); |
336 | |
489 | |
337 | for (;;) |
490 | for (;;) |
338 | { |
491 | { |
339 | ETP_REQ *req; |
492 | ETP_REQ *req; |
340 | |
493 | |
341 | etp_maybe_start_thread (); |
494 | etp_maybe_start_thread (pool); |
342 | |
495 | |
343 | X_LOCK (reslock); |
496 | X_LOCK (pool->reslock); |
344 | req = reqq_shift (&res_queue); |
497 | req = reqq_shift (&pool->res_queue); |
345 | |
498 | |
346 | if (req) |
499 | if (ecb_expect_true (req)) |
347 | { |
500 | { |
348 | --npending; |
501 | --pool->npending; |
349 | |
502 | |
350 | if (!res_queue.size && done_poll_cb) |
503 | if (!pool->res_queue.size) |
351 | done_poll_cb (); |
504 | ETP_DONE_POLL (pool); |
352 | } |
505 | } |
353 | |
506 | |
354 | X_UNLOCK (reslock); |
507 | X_UNLOCK (pool->reslock); |
355 | |
508 | |
356 | if (!req) |
509 | if (ecb_expect_false (!req)) |
357 | return 0; |
510 | return 0; |
358 | |
511 | |
359 | X_LOCK (reqlock); |
512 | X_LOCK (pool->reqlock); |
360 | --nreqs; |
513 | --pool->nreqs; |
361 | X_UNLOCK (reqlock); |
514 | X_UNLOCK (pool->reqlock); |
362 | |
515 | |
363 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP && req->size)) |
516 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP && req->size)) |
364 | { |
517 | { |
365 | req->int1 = 1; /* mark request as delayed */ |
518 | req->flags |= ETP_FLAG_DELAYED; /* mark request as delayed */ |
366 | continue; |
519 | continue; |
367 | } |
520 | } |
368 | else |
521 | else |
369 | { |
522 | { |
370 | int res = ETP_FINISH (req); |
523 | int res = ETP_FINISH (req); |
… | |
… | |
387 | errno = EAGAIN; |
540 | errno = EAGAIN; |
388 | return -1; |
541 | return -1; |
389 | } |
542 | } |
390 | |
543 | |
391 | ETP_API_DECL void |
544 | ETP_API_DECL void |
392 | etp_grp_cancel (ETP_REQ *grp); |
545 | etp_grp_cancel (etp_pool pool, ETP_REQ *grp); |
393 | |
546 | |
394 | ETP_API_DECL void |
547 | ETP_API_DECL void |
395 | etp_cancel (ETP_REQ *req) |
548 | etp_cancel (etp_pool pool, ETP_REQ *req) |
396 | { |
549 | { |
397 | req->cancelled = 1; |
550 | req->cancelled = 1; |
398 | |
551 | |
399 | etp_grp_cancel (req); |
552 | etp_grp_cancel (pool, req); |
400 | } |
553 | } |
401 | |
554 | |
402 | ETP_API_DECL void |
555 | ETP_API_DECL void |
403 | etp_grp_cancel (ETP_REQ *grp) |
556 | etp_grp_cancel (etp_pool pool, ETP_REQ *grp) |
404 | { |
557 | { |
405 | for (grp = grp->grp_first; grp; grp = grp->grp_next) |
558 | for (grp = grp->grp_first; grp; grp = grp->grp_next) |
406 | etp_cancel (grp); |
559 | etp_cancel (pool, grp); |
407 | } |
560 | } |
408 | |
561 | |
409 | ETP_API_DECL void |
562 | ETP_API_DECL void |
410 | etp_submit (ETP_REQ *req) |
563 | etp_submit (etp_pool pool, ETP_REQ *req) |
411 | { |
564 | { |
412 | req->pri -= ETP_PRI_MIN; |
565 | req->pri -= ETP_PRI_MIN; |
413 | |
566 | |
414 | if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; |
567 | if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; |
415 | if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
568 | if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
416 | |
569 | |
417 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP)) |
570 | if (ecb_expect_false (req->type == ETP_TYPE_GROUP)) |
418 | { |
571 | { |
419 | /* I hope this is worth it :/ */ |
572 | /* I hope this is worth it :/ */ |
420 | X_LOCK (reqlock); |
573 | X_LOCK (pool->reqlock); |
421 | ++nreqs; |
574 | ++pool->nreqs; |
422 | X_UNLOCK (reqlock); |
575 | X_UNLOCK (pool->reqlock); |
423 | |
576 | |
424 | X_LOCK (reslock); |
577 | X_LOCK (pool->reslock); |
425 | |
578 | |
426 | ++npending; |
579 | ++pool->npending; |
427 | |
580 | |
428 | if (!reqq_push (&res_queue, req) && want_poll_cb) |
581 | if (!reqq_push (&pool->res_queue, req)) |
429 | want_poll_cb (); |
582 | ETP_WANT_POLL (pool); |
430 | |
583 | |
431 | X_UNLOCK (reslock); |
584 | X_UNLOCK (pool->reslock); |
432 | } |
585 | } |
433 | else |
586 | else |
434 | { |
587 | { |
435 | X_LOCK (reqlock); |
588 | X_LOCK (pool->reqlock); |
436 | ++nreqs; |
589 | ++pool->nreqs; |
437 | ++nready; |
590 | ++pool->nready; |
438 | reqq_push (&req_queue, req); |
591 | reqq_push (&pool->req_queue, req); |
439 | X_COND_SIGNAL (reqwait); |
592 | X_COND_SIGNAL (pool->reqwait); |
440 | X_UNLOCK (reqlock); |
593 | X_UNLOCK (pool->reqlock); |
441 | |
594 | |
442 | etp_maybe_start_thread (); |
595 | etp_maybe_start_thread (pool); |
443 | } |
596 | } |
444 | } |
597 | } |
445 | |
598 | |
446 | ETP_API_DECL void ecb_cold |
599 | ETP_API_DECL void ecb_cold |
447 | etp_set_max_poll_time (double nseconds) |
600 | etp_set_max_poll_time (etp_pool pool, double seconds) |
448 | { |
601 | { |
449 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
602 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); |
450 | max_poll_time = nseconds * ETP_TICKS; |
603 | pool->max_poll_time = seconds * ETP_TICKS; |
451 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
604 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); |
452 | } |
605 | } |
453 | |
606 | |
454 | ETP_API_DECL void ecb_cold |
607 | ETP_API_DECL void ecb_cold |
455 | etp_set_max_poll_reqs (unsigned int maxreqs) |
608 | etp_set_max_poll_reqs (etp_pool pool, unsigned int maxreqs) |
456 | { |
609 | { |
457 | if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
610 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); |
458 | max_poll_reqs = maxreqs; |
611 | pool->max_poll_reqs = maxreqs; |
459 | if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
612 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); |
460 | } |
613 | } |
461 | |
614 | |
462 | ETP_API_DECL void ecb_cold |
615 | ETP_API_DECL void ecb_cold |
463 | etp_set_max_idle (unsigned int nthreads) |
616 | etp_set_max_idle (etp_pool pool, unsigned int threads) |
464 | { |
617 | { |
465 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
618 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
466 | max_idle = nthreads; |
619 | pool->max_idle = threads; |
467 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
620 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
468 | } |
621 | } |
469 | |
622 | |
470 | ETP_API_DECL void ecb_cold |
623 | ETP_API_DECL void ecb_cold |
471 | etp_set_idle_timeout (unsigned int seconds) |
624 | etp_set_idle_timeout (etp_pool pool, unsigned int seconds) |
472 | { |
625 | { |
473 | if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
626 | if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); |
474 | idle_timeout = seconds; |
627 | pool->idle_timeout = seconds; |
475 | if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
628 | if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); |
476 | } |
629 | } |
477 | |
630 | |
478 | ETP_API_DECL void ecb_cold |
631 | ETP_API_DECL void ecb_cold |
479 | etp_set_min_parallel (unsigned int nthreads) |
632 | etp_set_min_parallel (etp_pool pool, unsigned int threads) |
480 | { |
633 | { |
481 | if (wanted < nthreads) |
634 | if (pool->wanted < threads) |
482 | wanted = nthreads; |
635 | pool->wanted = threads; |
483 | } |
636 | } |
484 | |
637 | |
485 | ETP_API_DECL void ecb_cold |
638 | ETP_API_DECL void ecb_cold |
486 | etp_set_max_parallel (unsigned int nthreads) |
639 | etp_set_max_parallel (etp_pool pool, unsigned int threads) |
487 | { |
640 | { |
488 | if (wanted > nthreads) |
641 | if (pool->wanted > threads) |
489 | wanted = nthreads; |
642 | pool->wanted = threads; |
490 | |
643 | |
491 | while (started > wanted) |
644 | while (pool->started > pool->wanted) |
492 | etp_end_thread (); |
645 | etp_end_thread (pool); |
493 | } |
646 | } |
494 | |
647 | |