… | |
… | |
43 | #if !defined(STACK_ADJUST_PTR) |
43 | #if !defined(STACK_ADJUST_PTR) |
44 | /* IRIX is decidedly NON-unix */ |
44 | /* IRIX is decidedly NON-unix */ |
45 | # if __sgi |
45 | # if __sgi |
46 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
46 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
47 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
47 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
48 | # elif __i386__ && CORO_LINUX |
48 | # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) |
49 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
49 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
50 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
50 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
51 | # elif __amd64__ && CORO_LINUX |
51 | # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) |
52 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
52 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
53 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
53 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
54 | # else |
54 | # else |
55 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
55 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
56 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
56 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
… | |
… | |
120 | # endif |
120 | # endif |
121 | |
121 | |
122 | #endif |
122 | #endif |
123 | |
123 | |
124 | #if CORO_ASM |
124 | #if CORO_ASM |
125 | void __attribute__((__noinline__, __fastcall__)) |
125 | asm ( |
126 | coro_transfer (struct coro_context *prev, struct coro_context *next) |
126 | ".text\n" |
127 | { |
127 | ".globl coro_transfer\n" |
128 | asm volatile ( |
128 | ".type coro_transfer, @function\n" |
|
|
129 | "coro_transfer:\n" |
129 | #if __amd64 |
130 | #if __amd64 |
130 | # define NUM_CLOBBERED 5 |
131 | # define NUM_SAVED 6 |
|
|
132 | "\tpush %rbp\n" |
131 | "push %%rbx\n\t" |
133 | "\tpush %rbx\n" |
132 | "push %%r12\n\t" |
134 | "\tpush %r12\n" |
133 | "push %%r13\n\t" |
135 | "\tpush %r13\n" |
134 | "push %%r14\n\t" |
136 | "\tpush %r14\n" |
135 | "push %%r15\n\t" |
137 | "\tpush %r15\n" |
136 | "mov %%rsp, %0\n\t" |
138 | "\tmov %rsp, (%rdi)\n" |
137 | "mov %1, %%rsp\n\t" |
139 | "\tmov (%rsi), %rsp\n" |
138 | "pop %%r15\n\t" |
140 | "\tpop %r15\n" |
139 | "pop %%r14\n\t" |
141 | "\tpop %r14\n" |
140 | "pop %%r13\n\t" |
142 | "\tpop %r13\n" |
141 | "pop %%r12\n\t" |
143 | "\tpop %r12\n" |
142 | "pop %%rbx\n\t" |
144 | "\tpop %rbx\n" |
|
|
145 | "\tpop %rbp\n" |
143 | #elif __i386 |
146 | #elif __i386 |
144 | # define NUM_CLOBBERED 4 |
147 | # define NUM_SAVED 4 |
145 | "push %%ebx\n\t" |
|
|
146 | "push %%esi\n\t" |
|
|
147 | "push %%edi\n\t" |
|
|
148 | "push %%ebp\n\t" |
148 | "\tpush %ebp\n" |
|
|
149 | "\tpush %ebx\n" |
|
|
150 | "\tpush %esi\n" |
|
|
151 | "\tpush %edi\n" |
149 | "mov %%esp, %0\n\t" |
152 | "\tmov %esp, (%eax)\n" |
150 | "mov %1, %%esp\n\t" |
153 | "\tmov (%edx), %esp\n" |
151 | "pop %%ebp\n\t" |
|
|
152 | "pop %%edi\n\t" |
154 | "\tpop %edi\n" |
153 | "pop %%esi\n\t" |
155 | "\tpop %esi\n" |
154 | "pop %%ebx\n\t" |
156 | "\tpop %ebx\n" |
|
|
157 | "\tpop %ebp\n" |
155 | #else |
158 | #else |
156 | # error unsupported architecture |
159 | # error unsupported architecture |
157 | #endif |
160 | #endif |
158 | : "=m" (prev->sp) |
161 | "\tret\n" |
159 | : "m" (next->sp) |
162 | ); |
160 | ); |
|
|
161 | } |
|
|
162 | #endif |
163 | #endif |
163 | |
164 | |
164 | #if CORO_PTHREAD |
165 | #if CORO_PTHREAD |
165 | |
166 | |
166 | struct coro_init_args { |
167 | struct coro_init_args { |
… | |
… | |
185 | pthread_mutex_unlock (&coro_mutex); |
186 | pthread_mutex_unlock (&coro_mutex); |
186 | |
187 | |
187 | return 0; |
188 | return 0; |
188 | } |
189 | } |
189 | |
190 | |
|
|
191 | asm(""); |
|
|
192 | |
190 | void coro_transfer(coro_context *prev, coro_context *next) |
193 | void coro_transfer(coro_context *prev, coro_context *next) |
191 | { |
194 | { |
192 | pthread_cond_init (&prev->c, 0); |
195 | pthread_cond_init (&prev->c, 0); |
193 | pthread_cond_signal (&next->c); |
196 | pthread_cond_signal (&next->c); |
194 | pthread_cond_wait (&prev->c, &coro_mutex); |
197 | pthread_cond_wait (&prev->c, &coro_mutex); |
… | |
… | |
206 | |
209 | |
207 | getcontext (&(ctx->uc)); |
210 | getcontext (&(ctx->uc)); |
208 | |
211 | |
209 | ctx->uc.uc_link = 0; |
212 | ctx->uc.uc_link = 0; |
210 | ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize); |
213 | ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize); |
211 | ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize); |
214 | ctx->uc.uc_stack.ss_size = (size_t)STACK_ADJUST_SIZE (sptr,ssize); |
212 | ctx->uc.uc_stack.ss_flags = 0; |
215 | ctx->uc.uc_stack.ss_flags = 0; |
213 | |
216 | |
214 | makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); |
217 | makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); |
215 | |
218 | |
216 | #elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
219 | #elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
… | |
… | |
281 | |
284 | |
282 | # elif CORO_LOSER |
285 | # elif CORO_LOSER |
283 | |
286 | |
284 | setjmp (ctx->env); |
287 | setjmp (ctx->env); |
285 | #if __CYGWIN__ |
288 | #if __CYGWIN__ |
286 | ctx->env[7] = (long)((char *)sptr + ssize); |
289 | ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long); |
287 | ctx->env[8] = (long)coro_init; |
290 | ctx->env[8] = (long)coro_init; |
288 | #elif defined(_M_IX86) |
291 | #elif defined(_M_IX86) |
289 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; |
292 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; |
290 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); |
293 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
291 | #elif defined(_M_AMD64) |
294 | #elif defined(_M_AMD64) |
292 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; |
295 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; |
293 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
296 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
294 | #elif defined(_M_IA64) |
297 | #elif defined(_M_IA64) |
295 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; |
298 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; |
296 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
299 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
297 | #else |
300 | #else |
298 | # error "microsoft libc or architecture not supported" |
301 | # error "microsoft libc or architecture not supported" |
299 | #endif |
302 | #endif |
300 | |
303 | |
301 | # elif CORO_LINUX |
304 | # elif CORO_LINUX |
302 | |
305 | |
303 | _setjmp (ctx->env); |
306 | _setjmp (ctx->env); |
304 | #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) |
307 | #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) |
305 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
308 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
306 | ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize); |
309 | ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
307 | #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) |
310 | #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) |
308 | ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
311 | ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
309 | ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); |
312 | ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long); |
310 | #elif defined (__GNU_LIBRARY__) && defined (__i386__) |
313 | #elif defined (__GNU_LIBRARY__) && defined (__i386__) |
311 | ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; |
314 | ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; |
312 | ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); |
315 | ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long); |
313 | #elif defined (__GNU_LIBRARY__) && defined (__amd64__) |
316 | #elif defined (__GNU_LIBRARY__) && defined (__amd64__) |
314 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
317 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
315 | ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize); |
318 | ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long); |
316 | #else |
319 | #else |
317 | # error "linux libc or architecture not supported" |
320 | # error "linux libc or architecture not supported" |
318 | #endif |
321 | #endif |
319 | |
322 | |
320 | # elif CORO_IRIX |
323 | # elif CORO_IRIX |
321 | |
324 | |
322 | setjmp (ctx->env); |
325 | setjmp (ctx->env); |
323 | ctx->env[JB_PC] = (__uint64_t)coro_init; |
326 | ctx->env[JB_PC] = (__uint64_t)coro_init; |
324 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); |
327 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
325 | |
328 | |
326 | # elif CORO_ASM |
329 | # elif CORO_ASM |
327 | |
330 | |
328 | ctx->sp = (volatile void **)(ssize + (char *)sptr); |
331 | ctx->sp = (volatile void **)(ssize + (char *)sptr); |
|
|
332 | *--ctx->sp = (void *)abort; /* needed for alignment only */ |
329 | *--ctx->sp = (void *)coro_init; |
333 | *--ctx->sp = (void *)coro_init; |
330 | *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp |
|
|
331 | ctx->sp -= NUM_CLOBBERED; |
334 | ctx->sp -= NUM_SAVED; |
332 | |
335 | |
333 | # endif |
336 | # endif |
334 | |
337 | |
335 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
338 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
336 | |
339 | |