--- libcoro/coro.c 2008/01/20 17:30:24 1.28 +++ libcoro/coro.c 2008/10/30 09:44:31 1.33 @@ -11,9 +11,6 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO @@ -25,8 +22,19 @@ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License ("GPL") version 2 or any later version, + * in which case the provisions of the GPL are applicable instead of + * the above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use your + * version of this file under the BSD license, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete the + * provisions above, a recipient may use your version of this file under + * either the BSD or the GPL. + * * This library is modelled strictly after Ralf S. Engelschalls article at - * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must + * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must * go to Ralf S. Engelschall . */ @@ -37,10 +45,10 @@ # if __sgi # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) -# elif __i386__ && CORO_LINUX +# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) # define STACK_ADJUST_SIZE(sp,ss) (ss) -# elif __amd64__ && CORO_LINUX +# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) (ss) # else @@ -114,12 +122,12 @@ #endif #if CORO_ASM -void __attribute__((__noinline__, __fastcall__)) +void __attribute__((__noinline__, __regparm__(2))) coro_transfer (struct coro_context *prev, struct coro_context *next) { asm volatile ( #if __amd64 -# define NUM_CLOBBERED 5 +# define NUM_SAVED 5 "push %%rbx\n\t" "push %%r12\n\t" "push %%r13\n\t" @@ -133,7 +141,7 @@ "pop %%r12\n\t" "pop %%rbx\n\t" #elif __i386 -# define NUM_CLOBBERED 4 +# define NUM_SAVED 4 "push %%ebx\n\t" "push %%esi\n\t" "push %%edi\n\t" @@ -153,6 +161,42 @@ } #endif +#if CORO_PTHREAD + +struct coro_init_args { + coro_func func; + void *arg; + coro_context *self, *main; +}; + +pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; + +static void * +trampoline (void *args_) +{ + struct coro_init_args *args = (struct coro_init_args *)args_; + coro_func func = args->func; + void *arg = args->arg; + + pthread_mutex_lock (&coro_mutex); + pthread_cond_destroy (&args->self->c); + coro_transfer (args->self, args->main); + func (arg); + pthread_mutex_unlock (&coro_mutex); + + return 0; +} + +void coro_transfer(coro_context *prev, coro_context *next) +{ + pthread_cond_init (&prev->c, 0); + pthread_cond_signal (&next->c); + pthread_cond_wait (&prev->c, &coro_mutex); + pthread_cond_destroy (&prev->c); +} + +#endif + /* initialize a machine state */ void coro_create (coro_context *ctx, coro_func coro, void *arg, @@ -243,15 +287,15 @@ ctx->env[8] = (long)coro_init; #elif defined(_M_IX86) ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize); #elif defined(_M_AMD64) ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize); #elif defined(_M_IA64) ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; - ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); + ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize); #else -#error "microsoft libc or architecture not supported" +# error "microsoft libc or architecture not supported" #endif # elif CORO_LINUX @@ -282,16 +326,48 @@ # elif CORO_ASM ctx->sp = (volatile void **)(ssize + (char *)sptr); + /* we try to allow for both functions with and without frame pointers */ *--ctx->sp = (void *)coro_init; - *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp - ctx->sp -= NUM_CLOBBERED; + { + void **frame = ctx->sp - 1; + int i; + for (i = NUM_SAVED; i--; ) + *--ctx->sp = frame; + } # endif coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); +# elif CORO_PTHREAD + + pthread_t id; + pthread_attr_t attr; + coro_context nctx; + struct coro_init_args args; + static int once; + + if (!once) + { + pthread_mutex_lock (&coro_mutex); + once = 1; + } + + args.func = coro; + args.arg = arg; + args.self = ctx; + args.main = &nctx; + + pthread_attr_init (&attr); + pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); + pthread_attr_setstack (&attr, sptr, (size_t)ssize); + pthread_create (&id, &attr, trampoline, &args); + + pthread_cond_init (&args.self->c, 0); + coro_transfer (args.main, args.self); + #else -# error unsupported architecture +# error unsupported backend #endif }