ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.29 by root, Sun Mar 2 16:10:22 2008 UTC vs.
Revision 1.38 by root, Fri Nov 7 20:12:26 2008 UTC

38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <string.h>
44
43#if !defined(STACK_ADJUST_PTR) 45#if !defined(STACK_ADJUST_PTR)
44/* IRIX is decidedly NON-unix */ 46/* IRIX is decidedly NON-unix */
45# if __sgi 47# if __sgi
46# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 48# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
47# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 49# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
48# elif __i386__ && CORO_LINUX 50# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
49# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 51# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
50# define STACK_ADJUST_SIZE(sp,ss) (ss) 52# define STACK_ADJUST_SIZE(sp,ss) (ss)
51# elif __amd64__ && CORO_LINUX 53# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
52# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 54# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
53# define STACK_ADJUST_SIZE(sp,ss) (ss) 55# define STACK_ADJUST_SIZE(sp,ss) (ss)
54# else 56# else
55# define STACK_ADJUST_PTR(sp,ss) (sp) 57# define STACK_ADJUST_PTR(sp,ss) (sp)
56# define STACK_ADJUST_SIZE(sp,ss) (ss) 58# define STACK_ADJUST_SIZE(sp,ss) (ss)
61# include <stddef.h> 63# include <stddef.h>
62#endif 64#endif
63 65
64#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 66#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
65 67
66#include <stdlib.h> 68# include <stdlib.h>
67 69
68#if CORO_SJLJ 70# if CORO_SJLJ
69# include <stdio.h> 71# include <stdio.h>
70# include <signal.h> 72# include <signal.h>
71# include <unistd.h> 73# include <unistd.h>
72#endif 74# endif
73 75
74static volatile coro_func coro_init_func; 76static volatile coro_func coro_init_func;
75static volatile void *coro_init_arg; 77static volatile void *coro_init_arg;
76static volatile coro_context *new_coro, *create_coro; 78static volatile coro_context *new_coro, *create_coro;
77 79
78/* what we really want to detect here is wether we use a new-enough version of GAS */ 80/* what we really want to detect here is wether we use a new-enough version of GAS */
79/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */ 81/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
80#if __GNUC__ >= 3 && __ELF__ && __linux__ 82# if __GNUC__ >= 3 && __ELF__ && __linux__
81# define HAVE_CFI 1 83# define HAVE_CFI 1
82#endif 84# endif
83 85
84static void 86static void
85coro_init (void) 87coro_init (void)
86{ 88{
87 volatile coro_func func = coro_init_func; 89 volatile coro_func func = coro_init_func;
103static void 105static void
104trampoline (int sig) 106trampoline (int sig)
105{ 107{
106 if (setjmp (((coro_context *)new_coro)->env)) 108 if (setjmp (((coro_context *)new_coro)->env))
107 { 109 {
108#if HAVE_CFI 110# if HAVE_CFI
109 asm (".cfi_startproc"); 111 asm (".cfi_startproc");
110#endif 112# endif
111 coro_init (); /* start it */ 113 coro_init (); /* start it */
112#if HAVE_CFI 114# if HAVE_CFI
113 asm (".cfi_endproc"); 115 asm (".cfi_endproc");
114#endif 116# endif
115 } 117 }
116 else 118 else
117 trampoline_count++; 119 trampoline_count++;
118} 120}
119 121
120# endif 122# endif
121 123
122#endif 124#endif
123 125
124#if CORO_ASM 126#if CORO_ASM
125void __attribute__((__noinline__, __fastcall__)) 127
126coro_transfer (struct coro_context *prev, struct coro_context *next) 128 asm (
127{ 129 ".text\n"
128 asm volatile ( 130 ".globl coro_transfer\n"
131 ".type coro_transfer, @function\n"
132 "coro_transfer:\n"
129#if __amd64 133# if __amd64
130# define NUM_CLOBBERED 5 134# define NUM_SAVED 6
135 "\tpush %rbp\n"
131 "push %%rbx\n\t" 136 "\tpush %rbx\n"
132 "push %%r12\n\t" 137 "\tpush %r12\n"
133 "push %%r13\n\t" 138 "\tpush %r13\n"
134 "push %%r14\n\t" 139 "\tpush %r14\n"
135 "push %%r15\n\t" 140 "\tpush %r15\n"
136 "mov %%rsp, %0\n\t" 141 "\tmov %rsp, (%rdi)\n"
137 "mov %1, %%rsp\n\t" 142 "\tmov (%rsi), %rsp\n"
138 "pop %%r15\n\t" 143 "\tpop %r15\n"
139 "pop %%r14\n\t" 144 "\tpop %r14\n"
140 "pop %%r13\n\t" 145 "\tpop %r13\n"
141 "pop %%r12\n\t" 146 "\tpop %r12\n"
142 "pop %%rbx\n\t" 147 "\tpop %rbx\n"
148 "\tpop %rbp\n"
143#elif __i386 149# elif __i386
144# define NUM_CLOBBERED 4 150# define NUM_SAVED 4
145 "push %%ebx\n\t"
146 "push %%esi\n\t"
147 "push %%edi\n\t"
148 "push %%ebp\n\t" 151 "\tpush %ebp\n"
152 "\tpush %ebx\n"
153 "\tpush %esi\n"
154 "\tpush %edi\n"
149 "mov %%esp, %0\n\t" 155 "\tmov %esp, (%eax)\n"
150 "mov %1, %%esp\n\t" 156 "\tmov (%edx), %esp\n"
151 "pop %%ebp\n\t"
152 "pop %%edi\n\t" 157 "\tpop %edi\n"
153 "pop %%esi\n\t" 158 "\tpop %esi\n"
154 "pop %%ebx\n\t" 159 "\tpop %ebx\n"
160 "\tpop %ebp\n"
155#else 161# else
156# error unsupported architecture 162# error unsupported architecture
157#endif 163# endif
158 : "=m" (prev->sp) 164 "\tret\n"
159 : "m" (next->sp)
160 ); 165 );
166
167#endif
168
169#if CORO_PTHREAD
170
171/* this mutex will be locked by the running coroutine */
172pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
173
174struct coro_init_args
175{
176 coro_func func;
177 void *arg;
178 coro_context *self, *main;
179};
180
181static pthread_t null_tid;
182
183/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
184static void
185mutex_unlock_wrapper (void *arg)
186{
187 pthread_mutex_unlock ((pthread_mutex_t *)arg);
161} 188}
189
190static void *
191trampoline (void *args_)
192{
193 struct coro_init_args *args = (struct coro_init_args *)args_;
194 coro_func func = args->func;
195 void *arg = args->arg;
196
197 pthread_mutex_lock (&coro_mutex);
198
199 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
200 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
201 coro_transfer (args->self, args->main);
202 func (arg);
203 pthread_cleanup_pop (1);
204
205 return 0;
206}
207
208void
209coro_transfer (coro_context *prev, coro_context *next)
210{
211 pthread_cond_signal (&next->cv);
212 pthread_cond_wait (&prev->cv, &coro_mutex);
213}
214
215void
216coro_destroy (coro_context *ctx)
217{
218 if (!pthread_equal (ctx->id, null_tid))
219 {
220 pthread_cancel (ctx->id);
221 pthread_mutex_unlock (&coro_mutex);
222 pthread_join (ctx->id, 0);
223 pthread_mutex_lock (&coro_mutex);
224 }
225
226 pthread_cond_destroy (&ctx->cv);
227}
228
162#endif 229#endif
163 230
164/* initialize a machine state */ 231/* initialize a machine state */
165void coro_create (coro_context *ctx, 232void
166 coro_func coro, void *arg, 233coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
167 void *sptr, long ssize)
168{ 234{
169#if CORO_UCONTEXT 235#if CORO_UCONTEXT
236
237 if (!coro)
238 return;
170 239
171 getcontext (&(ctx->uc)); 240 getcontext (&(ctx->uc));
172 241
173 ctx->uc.uc_link = 0; 242 ctx->uc.uc_link = 0;
174 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize); 243 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
175 ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize); 244 ctx->uc.uc_stack.ss_size = (size_t)STACK_ADJUST_SIZE (sptr,ssize);
176 ctx->uc.uc_stack.ss_flags = 0; 245 ctx->uc.uc_stack.ss_flags = 0;
177 246
178 makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); 247 makecontext (&(ctx->uc), (void (*)())coro, 1, arg);
179 248
180#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 249#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
181 250
182# if CORO_SJLJ 251# if CORO_SJLJ
183 stack_t ostk, nstk; 252 stack_t ostk, nstk;
184 struct sigaction osa, nsa; 253 struct sigaction osa, nsa;
185 sigset_t nsig, osig; 254 sigset_t nsig, osig;
186# endif 255# endif
187 coro_context nctx; 256 coro_context nctx;
188 257
258 if (!coro)
259 return;
260
189 coro_init_func = coro; 261 coro_init_func = coro;
190 coro_init_arg = arg; 262 coro_init_arg = arg;
191 263
192 new_coro = ctx; 264 new_coro = ctx;
193 create_coro = &nctx; 265 create_coro = &nctx;
245 317
246# elif CORO_LOSER 318# elif CORO_LOSER
247 319
248 setjmp (ctx->env); 320 setjmp (ctx->env);
249#if __CYGWIN__ 321#if __CYGWIN__
250 ctx->env[7] = (long)((char *)sptr + ssize); 322 ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long);
251 ctx->env[8] = (long)coro_init; 323 ctx->env[8] = (long)coro_init;
252#elif defined(_M_IX86) 324#elif defined(_M_IX86)
253 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; 325 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
254 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); 326 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
255#elif defined(_M_AMD64) 327#elif defined(_M_AMD64)
256 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; 328 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
257 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 329 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
258#elif defined(_M_IA64) 330#elif defined(_M_IA64)
259 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; 331 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
260 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 332 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
261#else 333#else
262#error "microsoft libc or architecture not supported" 334# error "microsoft libc or architecture not supported"
263#endif 335#endif
264 336
265# elif CORO_LINUX 337# elif CORO_LINUX
266 338
267 _setjmp (ctx->env); 339 _setjmp (ctx->env);
268#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) 340#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
269 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 341 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
270 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize); 342 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
271#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) 343#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
272 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 344 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
273 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); 345 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long);
274#elif defined (__GNU_LIBRARY__) && defined (__i386__) 346#elif defined (__GNU_LIBRARY__) && defined (__i386__)
275 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; 347 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
276 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); 348 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
277#elif defined (__GNU_LIBRARY__) && defined (__amd64__) 349#elif defined (__GNU_LIBRARY__) && defined (__amd64__)
278 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 350 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
279 ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize); 351 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
280#else 352#else
281# error "linux libc or architecture not supported" 353# error "linux libc or architecture not supported"
282#endif 354#endif
283 355
284# elif CORO_IRIX 356# elif CORO_IRIX
285 357
286 setjmp (ctx->env); 358 setjmp (ctx->env);
287 ctx->env[JB_PC] = (__uint64_t)coro_init; 359 ctx->env[JB_PC] = (__uint64_t)coro_init;
288 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); 360 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
289 361
290# elif CORO_ASM 362# elif CORO_ASM
291 363
292 ctx->sp = (volatile void **)(ssize + (char *)sptr); 364 ctx->sp = (volatile void **)(ssize + (char *)sptr);
365 *--ctx->sp = (void *)abort; /* needed for alignment only */
293 *--ctx->sp = (void *)coro_init; 366 *--ctx->sp = (void *)coro_init;
294 *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp
295 ctx->sp -= NUM_CLOBBERED; 367 ctx->sp -= NUM_SAVED;
296 368
297# endif 369# endif
298 370
299 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); 371 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
300 372
373# elif CORO_PTHREAD
374
375 static coro_context nctx;
376 static int once;
377
378 if (!once)
379 {
380 once = 1;
381
382 pthread_mutex_lock (&coro_mutex);
383 pthread_cond_init (&nctx.cv, 0);
384 null_tid = pthread_self ();
385 }
386
387 pthread_cond_init (&ctx->cv, 0);
388
389 if (coro)
390 {
391 pthread_attr_t attr;
392 struct coro_init_args args;
393
394 args.func = coro;
395 args.arg = arg;
396 args.self = ctx;
397 args.main = &nctx;
398
399 pthread_attr_init (&attr);
400 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
401 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
402 pthread_create (&ctx->id, &attr, trampoline, &args);
403
404 coro_transfer (args.main, args.self);
405 }
406 else
407 ctx->id = null_tid;
408
301#else 409#else
302# error unsupported architecture 410# error unsupported backend
303#endif 411#endif
304} 412}
305 413

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines