… | |
… | |
43 | #if !defined(STACK_ADJUST_PTR) |
43 | #if !defined(STACK_ADJUST_PTR) |
44 | /* IRIX is decidedly NON-unix */ |
44 | /* IRIX is decidedly NON-unix */ |
45 | # if __sgi |
45 | # if __sgi |
46 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
46 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
47 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
47 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
48 | # elif __i386__ && CORO_LINUX |
48 | # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) |
49 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
49 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
50 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
50 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
51 | # elif __amd64__ && CORO_LINUX |
51 | # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) |
52 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
52 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
53 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
53 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
54 | # else |
54 | # else |
55 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
55 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
56 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
56 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
… | |
… | |
120 | # endif |
120 | # endif |
121 | |
121 | |
122 | #endif |
122 | #endif |
123 | |
123 | |
124 | #if CORO_ASM |
124 | #if CORO_ASM |
125 | void __attribute__((__noinline__, __fastcall__)) |
125 | void __attribute__((__noinline__, __regparm__(2))) |
126 | coro_transfer (struct coro_context *prev, struct coro_context *next) |
126 | coro_transfer (struct coro_context *prev, struct coro_context *next) |
127 | { |
127 | { |
128 | asm volatile ( |
128 | asm volatile ( |
129 | #if __amd64 |
129 | #if __amd64 |
130 | # define NUM_CLOBBERED 5 |
130 | # define NUM_SAVED 5 |
131 | "push %%rbx\n\t" |
131 | "push %%rbx\n\t" |
132 | "push %%r12\n\t" |
132 | "push %%r12\n\t" |
133 | "push %%r13\n\t" |
133 | "push %%r13\n\t" |
134 | "push %%r14\n\t" |
134 | "push %%r14\n\t" |
135 | "push %%r15\n\t" |
135 | "push %%r15\n\t" |
… | |
… | |
139 | "pop %%r14\n\t" |
139 | "pop %%r14\n\t" |
140 | "pop %%r13\n\t" |
140 | "pop %%r13\n\t" |
141 | "pop %%r12\n\t" |
141 | "pop %%r12\n\t" |
142 | "pop %%rbx\n\t" |
142 | "pop %%rbx\n\t" |
143 | #elif __i386 |
143 | #elif __i386 |
144 | # define NUM_CLOBBERED 4 |
144 | # define NUM_SAVED 4 |
145 | "push %%ebx\n\t" |
145 | "push %%ebx\n\t" |
146 | "push %%esi\n\t" |
146 | "push %%esi\n\t" |
147 | "push %%edi\n\t" |
147 | "push %%edi\n\t" |
148 | "push %%ebp\n\t" |
148 | "push %%ebp\n\t" |
149 | "mov %%esp, %0\n\t" |
149 | "mov %%esp, %0\n\t" |
… | |
… | |
159 | : "m" (next->sp) |
159 | : "m" (next->sp) |
160 | ); |
160 | ); |
161 | } |
161 | } |
162 | #endif |
162 | #endif |
163 | |
163 | |
|
|
164 | #if CORO_PTHREAD |
|
|
165 | |
|
|
166 | struct coro_init_args { |
|
|
167 | coro_func func; |
|
|
168 | void *arg; |
|
|
169 | coro_context *self, *main; |
|
|
170 | }; |
|
|
171 | |
|
|
172 | pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
|
173 | |
|
|
174 | static void * |
|
|
175 | trampoline (void *args_) |
|
|
176 | { |
|
|
177 | struct coro_init_args *args = (struct coro_init_args *)args_; |
|
|
178 | coro_func func = args->func; |
|
|
179 | void *arg = args->arg; |
|
|
180 | |
|
|
181 | pthread_mutex_lock (&coro_mutex); |
|
|
182 | pthread_cond_destroy (&args->self->c); |
|
|
183 | coro_transfer (args->self, args->main); |
|
|
184 | func (arg); |
|
|
185 | pthread_mutex_unlock (&coro_mutex); |
|
|
186 | |
|
|
187 | return 0; |
|
|
188 | } |
|
|
189 | |
|
|
190 | void coro_transfer(coro_context *prev, coro_context *next) |
|
|
191 | { |
|
|
192 | pthread_cond_init (&prev->c, 0); |
|
|
193 | pthread_cond_signal (&next->c); |
|
|
194 | pthread_cond_wait (&prev->c, &coro_mutex); |
|
|
195 | pthread_cond_destroy (&prev->c); |
|
|
196 | } |
|
|
197 | |
|
|
198 | #endif |
|
|
199 | |
164 | /* initialize a machine state */ |
200 | /* initialize a machine state */ |
165 | void coro_create (coro_context *ctx, |
201 | void coro_create (coro_context *ctx, |
166 | coro_func coro, void *arg, |
202 | coro_func coro, void *arg, |
167 | void *sptr, long ssize) |
203 | void *sptr, long ssize) |
168 | { |
204 | { |
… | |
… | |
249 | #if __CYGWIN__ |
285 | #if __CYGWIN__ |
250 | ctx->env[7] = (long)((char *)sptr + ssize); |
286 | ctx->env[7] = (long)((char *)sptr + ssize); |
251 | ctx->env[8] = (long)coro_init; |
287 | ctx->env[8] = (long)coro_init; |
252 | #elif defined(_M_IX86) |
288 | #elif defined(_M_IX86) |
253 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; |
289 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; |
254 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); |
290 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize); |
255 | #elif defined(_M_AMD64) |
291 | #elif defined(_M_AMD64) |
256 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; |
292 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; |
257 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
293 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize); |
258 | #elif defined(_M_IA64) |
294 | #elif defined(_M_IA64) |
259 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; |
295 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; |
260 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
296 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize); |
261 | #else |
297 | #else |
262 | #error "microsoft libc or architecture not supported" |
298 | # error "microsoft libc or architecture not supported" |
263 | #endif |
299 | #endif |
264 | |
300 | |
265 | # elif CORO_LINUX |
301 | # elif CORO_LINUX |
266 | |
302 | |
267 | _setjmp (ctx->env); |
303 | _setjmp (ctx->env); |
… | |
… | |
288 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); |
324 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); |
289 | |
325 | |
290 | # elif CORO_ASM |
326 | # elif CORO_ASM |
291 | |
327 | |
292 | ctx->sp = (volatile void **)(ssize + (char *)sptr); |
328 | ctx->sp = (volatile void **)(ssize + (char *)sptr); |
|
|
329 | /* we try to allow for both functions with and without frame pointers */ |
293 | *--ctx->sp = (void *)coro_init; |
330 | *--ctx->sp = (void *)coro_init; |
294 | *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp |
331 | { |
295 | ctx->sp -= NUM_CLOBBERED; |
332 | void **frame = ctx->sp - 1; |
|
|
333 | int i; |
|
|
334 | for (i = NUM_SAVED; i--; ) |
|
|
335 | *--ctx->sp = frame; |
|
|
336 | } |
296 | |
337 | |
297 | # endif |
338 | # endif |
298 | |
339 | |
299 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
340 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
300 | |
341 | |
|
|
342 | # elif CORO_PTHREAD |
|
|
343 | |
|
|
344 | pthread_t id; |
|
|
345 | pthread_attr_t attr; |
|
|
346 | coro_context nctx; |
|
|
347 | struct coro_init_args args; |
|
|
348 | static int once; |
|
|
349 | |
|
|
350 | if (!once) |
|
|
351 | { |
|
|
352 | pthread_mutex_lock (&coro_mutex); |
|
|
353 | once = 1; |
|
|
354 | } |
|
|
355 | |
|
|
356 | args.func = coro; |
|
|
357 | args.arg = arg; |
|
|
358 | args.self = ctx; |
|
|
359 | args.main = &nctx; |
|
|
360 | |
|
|
361 | pthread_attr_init (&attr); |
|
|
362 | pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); |
|
|
363 | pthread_attr_setstack (&attr, sptr, (size_t)ssize); |
|
|
364 | pthread_create (&id, &attr, trampoline, &args); |
|
|
365 | |
|
|
366 | pthread_cond_init (&args.self->c, 0); |
|
|
367 | coro_transfer (args.main, args.self); |
|
|
368 | |
301 | #else |
369 | #else |
302 | # error unsupported architecture |
370 | # error unsupported backend |
303 | #endif |
371 | #endif |
304 | } |
372 | } |
305 | |
373 | |