ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/cvsroot/libcoro/coro.c
(Generate patch)

Comparing cvsroot/libcoro/coro.c (file contents):
Revision 1.28 by root, Sun Jan 20 17:30:24 2008 UTC vs.
Revision 1.38 by root, Fri Nov 7 20:12:26 2008 UTC

9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 13 *
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 22 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26 * OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THE POSSIBILITY OF SUCH DAMAGE.
27 * 24 *
25 * Alternatively, the contents of this file may be used under the terms of
26 * the GNU General Public License ("GPL") version 2 or any later version,
27 * in which case the provisions of the GPL are applicable instead of
28 * the above. If you wish to allow the use of your version of this file
29 * only under the terms of the GPL and not to allow others to use your
30 * version of this file under the BSD license, indicate your decision
31 * by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL. If you do not delete the
33 * provisions above, a recipient may use your version of this file under
34 * either the BSD or the GPL.
35 *
28 * This library is modelled strictly after Ralf S. Engelschalls article at 36 * This library is modelled strictly after Ralf S. Engelschalls article at
29 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must 37 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must
30 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
31 */ 39 */
32 40
33#include "coro.h" 41#include "coro.h"
42
43#include <string.h>
34 44
35#if !defined(STACK_ADJUST_PTR) 45#if !defined(STACK_ADJUST_PTR)
36/* IRIX is decidedly NON-unix */ 46/* IRIX is decidedly NON-unix */
37# if __sgi 47# if __sgi
38# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 48# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
39# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 49# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
40# elif __i386__ && CORO_LINUX 50# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
41# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 51# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
42# define STACK_ADJUST_SIZE(sp,ss) (ss) 52# define STACK_ADJUST_SIZE(sp,ss) (ss)
43# elif __amd64__ && CORO_LINUX 53# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
44# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 54# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
45# define STACK_ADJUST_SIZE(sp,ss) (ss) 55# define STACK_ADJUST_SIZE(sp,ss) (ss)
46# else 56# else
47# define STACK_ADJUST_PTR(sp,ss) (sp) 57# define STACK_ADJUST_PTR(sp,ss) (sp)
48# define STACK_ADJUST_SIZE(sp,ss) (ss) 58# define STACK_ADJUST_SIZE(sp,ss) (ss)
53# include <stddef.h> 63# include <stddef.h>
54#endif 64#endif
55 65
56#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 66#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
57 67
58#include <stdlib.h> 68# include <stdlib.h>
59 69
60#if CORO_SJLJ 70# if CORO_SJLJ
61# include <stdio.h> 71# include <stdio.h>
62# include <signal.h> 72# include <signal.h>
63# include <unistd.h> 73# include <unistd.h>
64#endif 74# endif
65 75
66static volatile coro_func coro_init_func; 76static volatile coro_func coro_init_func;
67static volatile void *coro_init_arg; 77static volatile void *coro_init_arg;
68static volatile coro_context *new_coro, *create_coro; 78static volatile coro_context *new_coro, *create_coro;
69 79
70/* what we really want to detect here is wether we use a new-enough version of GAS */ 80/* what we really want to detect here is wether we use a new-enough version of GAS */
71/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */ 81/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
72#if __GNUC__ >= 3 && __ELF__ && __linux__ 82# if __GNUC__ >= 3 && __ELF__ && __linux__
73# define HAVE_CFI 1 83# define HAVE_CFI 1
74#endif 84# endif
75 85
76static void 86static void
77coro_init (void) 87coro_init (void)
78{ 88{
79 volatile coro_func func = coro_init_func; 89 volatile coro_func func = coro_init_func;
95static void 105static void
96trampoline (int sig) 106trampoline (int sig)
97{ 107{
98 if (setjmp (((coro_context *)new_coro)->env)) 108 if (setjmp (((coro_context *)new_coro)->env))
99 { 109 {
100#if HAVE_CFI 110# if HAVE_CFI
101 asm (".cfi_startproc"); 111 asm (".cfi_startproc");
102#endif 112# endif
103 coro_init (); /* start it */ 113 coro_init (); /* start it */
104#if HAVE_CFI 114# if HAVE_CFI
105 asm (".cfi_endproc"); 115 asm (".cfi_endproc");
106#endif 116# endif
107 } 117 }
108 else 118 else
109 trampoline_count++; 119 trampoline_count++;
110} 120}
111 121
112# endif 122# endif
113 123
114#endif 124#endif
115 125
116#if CORO_ASM 126#if CORO_ASM
117void __attribute__((__noinline__, __fastcall__)) 127
118coro_transfer (struct coro_context *prev, struct coro_context *next) 128 asm (
119{ 129 ".text\n"
120 asm volatile ( 130 ".globl coro_transfer\n"
131 ".type coro_transfer, @function\n"
132 "coro_transfer:\n"
121#if __amd64 133# if __amd64
122# define NUM_CLOBBERED 5 134# define NUM_SAVED 6
135 "\tpush %rbp\n"
123 "push %%rbx\n\t" 136 "\tpush %rbx\n"
124 "push %%r12\n\t" 137 "\tpush %r12\n"
125 "push %%r13\n\t" 138 "\tpush %r13\n"
126 "push %%r14\n\t" 139 "\tpush %r14\n"
127 "push %%r15\n\t" 140 "\tpush %r15\n"
128 "mov %%rsp, %0\n\t" 141 "\tmov %rsp, (%rdi)\n"
129 "mov %1, %%rsp\n\t" 142 "\tmov (%rsi), %rsp\n"
130 "pop %%r15\n\t" 143 "\tpop %r15\n"
131 "pop %%r14\n\t" 144 "\tpop %r14\n"
132 "pop %%r13\n\t" 145 "\tpop %r13\n"
133 "pop %%r12\n\t" 146 "\tpop %r12\n"
134 "pop %%rbx\n\t" 147 "\tpop %rbx\n"
148 "\tpop %rbp\n"
135#elif __i386 149# elif __i386
136# define NUM_CLOBBERED 4 150# define NUM_SAVED 4
137 "push %%ebx\n\t"
138 "push %%esi\n\t"
139 "push %%edi\n\t"
140 "push %%ebp\n\t" 151 "\tpush %ebp\n"
152 "\tpush %ebx\n"
153 "\tpush %esi\n"
154 "\tpush %edi\n"
141 "mov %%esp, %0\n\t" 155 "\tmov %esp, (%eax)\n"
142 "mov %1, %%esp\n\t" 156 "\tmov (%edx), %esp\n"
143 "pop %%ebp\n\t"
144 "pop %%edi\n\t" 157 "\tpop %edi\n"
145 "pop %%esi\n\t" 158 "\tpop %esi\n"
146 "pop %%ebx\n\t" 159 "\tpop %ebx\n"
160 "\tpop %ebp\n"
147#else 161# else
148# error unsupported architecture 162# error unsupported architecture
149#endif 163# endif
150 : "=m" (prev->sp) 164 "\tret\n"
151 : "m" (next->sp)
152 ); 165 );
166
167#endif
168
169#if CORO_PTHREAD
170
171/* this mutex will be locked by the running coroutine */
172pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
173
174struct coro_init_args
175{
176 coro_func func;
177 void *arg;
178 coro_context *self, *main;
179};
180
181static pthread_t null_tid;
182
183/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
184static void
185mutex_unlock_wrapper (void *arg)
186{
187 pthread_mutex_unlock ((pthread_mutex_t *)arg);
153} 188}
189
190static void *
191trampoline (void *args_)
192{
193 struct coro_init_args *args = (struct coro_init_args *)args_;
194 coro_func func = args->func;
195 void *arg = args->arg;
196
197 pthread_mutex_lock (&coro_mutex);
198
199 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
200 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
201 coro_transfer (args->self, args->main);
202 func (arg);
203 pthread_cleanup_pop (1);
204
205 return 0;
206}
207
208void
209coro_transfer (coro_context *prev, coro_context *next)
210{
211 pthread_cond_signal (&next->cv);
212 pthread_cond_wait (&prev->cv, &coro_mutex);
213}
214
215void
216coro_destroy (coro_context *ctx)
217{
218 if (!pthread_equal (ctx->id, null_tid))
219 {
220 pthread_cancel (ctx->id);
221 pthread_mutex_unlock (&coro_mutex);
222 pthread_join (ctx->id, 0);
223 pthread_mutex_lock (&coro_mutex);
224 }
225
226 pthread_cond_destroy (&ctx->cv);
227}
228
154#endif 229#endif
155 230
156/* initialize a machine state */ 231/* initialize a machine state */
157void coro_create (coro_context *ctx, 232void
158 coro_func coro, void *arg, 233coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
159 void *sptr, long ssize)
160{ 234{
161#if CORO_UCONTEXT 235#if CORO_UCONTEXT
236
237 if (!coro)
238 return;
162 239
163 getcontext (&(ctx->uc)); 240 getcontext (&(ctx->uc));
164 241
165 ctx->uc.uc_link = 0; 242 ctx->uc.uc_link = 0;
166 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize); 243 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
167 ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize); 244 ctx->uc.uc_stack.ss_size = (size_t)STACK_ADJUST_SIZE (sptr,ssize);
168 ctx->uc.uc_stack.ss_flags = 0; 245 ctx->uc.uc_stack.ss_flags = 0;
169 246
170 makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); 247 makecontext (&(ctx->uc), (void (*)())coro, 1, arg);
171 248
172#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 249#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
173 250
174# if CORO_SJLJ 251# if CORO_SJLJ
175 stack_t ostk, nstk; 252 stack_t ostk, nstk;
176 struct sigaction osa, nsa; 253 struct sigaction osa, nsa;
177 sigset_t nsig, osig; 254 sigset_t nsig, osig;
178# endif 255# endif
179 coro_context nctx; 256 coro_context nctx;
180 257
258 if (!coro)
259 return;
260
181 coro_init_func = coro; 261 coro_init_func = coro;
182 coro_init_arg = arg; 262 coro_init_arg = arg;
183 263
184 new_coro = ctx; 264 new_coro = ctx;
185 create_coro = &nctx; 265 create_coro = &nctx;
237 317
238# elif CORO_LOSER 318# elif CORO_LOSER
239 319
240 setjmp (ctx->env); 320 setjmp (ctx->env);
241#if __CYGWIN__ 321#if __CYGWIN__
242 ctx->env[7] = (long)((char *)sptr + ssize); 322 ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long);
243 ctx->env[8] = (long)coro_init; 323 ctx->env[8] = (long)coro_init;
244#elif defined(_M_IX86) 324#elif defined(_M_IX86)
245 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; 325 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
246 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); 326 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
247#elif defined(_M_AMD64) 327#elif defined(_M_AMD64)
248 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; 328 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
249 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 329 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
250#elif defined(_M_IA64) 330#elif defined(_M_IA64)
251 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; 331 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
252 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 332 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
253#else 333#else
254#error "microsoft libc or architecture not supported" 334# error "microsoft libc or architecture not supported"
255#endif 335#endif
256 336
257# elif CORO_LINUX 337# elif CORO_LINUX
258 338
259 _setjmp (ctx->env); 339 _setjmp (ctx->env);
260#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) 340#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
261 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 341 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
262 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize); 342 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
263#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) 343#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
264 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 344 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
265 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); 345 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long);
266#elif defined (__GNU_LIBRARY__) && defined (__i386__) 346#elif defined (__GNU_LIBRARY__) && defined (__i386__)
267 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; 347 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
268 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); 348 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
269#elif defined (__GNU_LIBRARY__) && defined (__amd64__) 349#elif defined (__GNU_LIBRARY__) && defined (__amd64__)
270 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 350 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
271 ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize); 351 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
272#else 352#else
273# error "linux libc or architecture not supported" 353# error "linux libc or architecture not supported"
274#endif 354#endif
275 355
276# elif CORO_IRIX 356# elif CORO_IRIX
277 357
278 setjmp (ctx->env); 358 setjmp (ctx->env);
279 ctx->env[JB_PC] = (__uint64_t)coro_init; 359 ctx->env[JB_PC] = (__uint64_t)coro_init;
280 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); 360 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
281 361
282# elif CORO_ASM 362# elif CORO_ASM
283 363
284 ctx->sp = (volatile void **)(ssize + (char *)sptr); 364 ctx->sp = (volatile void **)(ssize + (char *)sptr);
365 *--ctx->sp = (void *)abort; /* needed for alignment only */
285 *--ctx->sp = (void *)coro_init; 366 *--ctx->sp = (void *)coro_init;
286 *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp
287 ctx->sp -= NUM_CLOBBERED; 367 ctx->sp -= NUM_SAVED;
288 368
289# endif 369# endif
290 370
291 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); 371 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
292 372
373# elif CORO_PTHREAD
374
375 static coro_context nctx;
376 static int once;
377
378 if (!once)
379 {
380 once = 1;
381
382 pthread_mutex_lock (&coro_mutex);
383 pthread_cond_init (&nctx.cv, 0);
384 null_tid = pthread_self ();
385 }
386
387 pthread_cond_init (&ctx->cv, 0);
388
389 if (coro)
390 {
391 pthread_attr_t attr;
392 struct coro_init_args args;
393
394 args.func = coro;
395 args.arg = arg;
396 args.self = ctx;
397 args.main = &nctx;
398
399 pthread_attr_init (&attr);
400 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
401 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
402 pthread_create (&ctx->id, &attr, trampoline, &args);
403
404 coro_transfer (args.main, args.self);
405 }
406 else
407 ctx->id = null_tid;
408
293#else 409#else
294# error unsupported architecture 410# error unsupported backend
295#endif 411#endif
296} 412}
297 413

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines