--- libcoro/coro.c 2008/03/02 16:10:22 1.29 +++ libcoro/coro.c 2008/10/30 09:44:31 1.33 @@ -45,10 +45,10 @@ # if __sgi # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) -# elif __i386__ && CORO_LINUX +# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) # define STACK_ADJUST_SIZE(sp,ss) (ss) -# elif __amd64__ && CORO_LINUX +# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) (ss) # else @@ -122,12 +122,12 @@ #endif #if CORO_ASM -void __attribute__((__noinline__, __fastcall__)) +void __attribute__((__noinline__, __regparm__(2))) coro_transfer (struct coro_context *prev, struct coro_context *next) { asm volatile ( #if __amd64 -# define NUM_CLOBBERED 5 +# define NUM_SAVED 5 "push %%rbx\n\t" "push %%r12\n\t" "push %%r13\n\t" @@ -141,7 +141,7 @@ "pop %%r12\n\t" "pop %%rbx\n\t" #elif __i386 -# define NUM_CLOBBERED 4 +# define NUM_SAVED 4 "push %%ebx\n\t" "push %%esi\n\t" "push %%edi\n\t" @@ -161,6 +161,42 @@ } #endif +#if CORO_PTHREAD + +struct coro_init_args { + coro_func func; + void *arg; + coro_context *self, *main; +}; + +pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; + +static void * +trampoline (void *args_) +{ + struct coro_init_args *args = (struct coro_init_args *)args_; + coro_func func = args->func; + void *arg = args->arg; + + pthread_mutex_lock (&coro_mutex); + pthread_cond_destroy (&args->self->c); + coro_transfer (args->self, args->main); + func (arg); + pthread_mutex_unlock (&coro_mutex); + + return 0; +} + +void coro_transfer(coro_context *prev, coro_context *next) +{ + pthread_cond_init (&prev->c, 0); + pthread_cond_signal (&next->c); + pthread_cond_wait (&prev->c, &coro_mutex); + pthread_cond_destroy (&prev->c); +} + +#endif + /* initialize a machine state */ void coro_create (coro_context *ctx, coro_func coro, void *arg, @@ -251,15 +287,15 @@ ctx->env[8] = (long)coro_init; #elif defined(_M_IX86) ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize); #elif defined(_M_AMD64) ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize); #elif defined(_M_IA64) ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize); #else -#error "microsoft libc or architecture not supported" +# error "microsoft libc or architecture not supported" #endif # elif CORO_LINUX @@ -290,16 +326,48 @@ # elif CORO_ASM ctx->sp = (volatile void **)(ssize + (char *)sptr); + /* we try to allow for both functions with and without frame pointers */ *--ctx->sp = (void *)coro_init; - *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp - ctx->sp -= NUM_CLOBBERED; + { + void **frame = ctx->sp - 1; + int i; + for (i = NUM_SAVED; i--; ) + *--ctx->sp = frame; + } # endif coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); +# elif CORO_PTHREAD + + pthread_t id; + pthread_attr_t attr; + coro_context nctx; + struct coro_init_args args; + static int once; + + if (!once) + { + pthread_mutex_lock (&coro_mutex); + once = 1; + } + + args.func = coro; + args.arg = arg; + args.self = ctx; + args.main = &nctx; + + pthread_attr_init (&attr); + pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); + pthread_attr_setstack (&attr, sptr, (size_t)ssize); + pthread_create (&id, &attr, trampoline, &args); + + pthread_cond_init (&args.self->c, 0); + coro_transfer (args.main, args.self); + #else -# error unsupported architecture +# error unsupported backend #endif }