1 | /* |
1 | /* |
2 | * Copyright (c) 2001-2005 Marc Alexander Lehmann <schmorp@schmorp.de> |
2 | * Copyright (c) 2001-2006 Marc Alexander Lehmann <schmorp@schmorp.de> |
3 | * |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without modifica- |
4 | * Redistribution and use in source and binary forms, with or without modifica- |
5 | * tion, are permitted provided that the following conditions are met: |
5 | * tion, are permitted provided that the following conditions are met: |
6 | * |
6 | * |
7 | * 1. Redistributions of source code must retain the above copyright notice, |
7 | * 1. Redistributions of source code must retain the above copyright notice, |
… | |
… | |
38 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
38 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
39 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
39 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
40 | # elif __i386__ && CORO_LINUX |
40 | # elif __i386__ && CORO_LINUX |
41 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
41 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
42 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
42 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
|
|
43 | # elif __amd64__ && CORO_LINUX |
|
|
44 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
|
|
45 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
43 | # else |
46 | # else |
44 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
47 | # define STACK_ADJUST_PTR(sp,ss) (sp) |
45 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
48 | # define STACK_ADJUST_SIZE(sp,ss) (ss) |
46 | # endif |
49 | # endif |
47 | #endif |
50 | #endif |
48 | |
51 | |
|
|
52 | #if CORO_UCONTEXT |
|
|
53 | # include <stddef.h> |
|
|
54 | #endif |
|
|
55 | |
49 | #if CORO_SJLJ || CORO_LOOSE || CORO_LINUX || CORO_IRIX |
56 | #if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
50 | |
57 | |
|
|
58 | #include <stdlib.h> |
|
|
59 | |
|
|
60 | #if CORO_SJLJ |
|
|
61 | # include <stdio.h> |
51 | #include <signal.h> |
62 | # include <signal.h> |
|
|
63 | # include <unistd.h> |
|
|
64 | #endif |
52 | |
65 | |
53 | static volatile coro_func coro_init_func; |
66 | static volatile coro_func coro_init_func; |
54 | static volatile void *coro_init_arg; |
67 | static volatile void *coro_init_arg; |
55 | static volatile coro_context *new_coro, *create_coro; |
68 | static volatile coro_context *new_coro, *create_coro; |
56 | |
69 | |
|
|
70 | /* what we really want to detect here is wether we use a new-enough version of GAS */ |
|
|
71 | /* instead, check for gcc 3 and ELF and hope for the best */ |
|
|
72 | #if __GNUC__ >= 3 && __ELF__ |
|
|
73 | # define HAVE_CFI 1 |
|
|
74 | #endif |
|
|
75 | |
57 | static void |
76 | static void |
58 | coro_init (void) |
77 | coro_init (void) |
59 | { |
78 | { |
60 | volatile coro_func func = coro_init_func; |
79 | volatile coro_func func = coro_init_func; |
61 | volatile void *arg = coro_init_arg; |
80 | volatile void *arg = coro_init_arg; |
… | |
… | |
72 | |
91 | |
73 | static volatile int trampoline_count; |
92 | static volatile int trampoline_count; |
74 | |
93 | |
75 | /* trampoline signal handler */ |
94 | /* trampoline signal handler */ |
76 | static void |
95 | static void |
77 | trampoline(int sig) |
96 | trampoline (int sig) |
78 | { |
97 | { |
79 | if (setjmp (((coro_context *)new_coro)->env)) |
98 | if (setjmp (((coro_context *)new_coro)->env)) |
|
|
99 | { |
|
|
100 | #if HAVE_CFI |
|
|
101 | asm (".cfi_startproc"); |
|
|
102 | #endif |
80 | coro_init (); /* start it */ |
103 | coro_init (); /* start it */ |
|
|
104 | #if HAVE_CFI |
|
|
105 | asm (".cfi_endproc"); |
|
|
106 | #endif |
|
|
107 | } |
81 | else |
108 | else |
82 | trampoline_count++; |
109 | trampoline_count++; |
83 | } |
110 | } |
84 | |
111 | |
85 | # endif |
112 | # endif |
86 | |
113 | |
87 | #endif |
114 | #endif |
88 | |
115 | |
|
|
116 | #if CORO_ASM |
|
|
117 | void __attribute__((__noinline__, __fastcall__)) |
|
|
118 | coro_transfer (struct coro_context *prev, struct coro_context *next) |
|
|
119 | { |
|
|
120 | asm volatile ( |
|
|
121 | #if __amd64 |
|
|
122 | # define NUM_CLOBBERED 5 |
|
|
123 | "push %%rbx\n\t" |
|
|
124 | "push %%r12\n\t" |
|
|
125 | "push %%r13\n\t" |
|
|
126 | "push %%r14\n\t" |
|
|
127 | "push %%r15\n\t" |
|
|
128 | "mov %%rsp, %0\n\t" |
|
|
129 | "mov %1, %%rsp\n\t" |
|
|
130 | "pop %%r15\n\t" |
|
|
131 | "pop %%r14\n\t" |
|
|
132 | "pop %%r13\n\t" |
|
|
133 | "pop %%r12\n\t" |
|
|
134 | "pop %%rbx\n\t" |
|
|
135 | #elif __i386 |
|
|
136 | # define NUM_CLOBBERED 4 |
|
|
137 | "push %%ebx\n\t" |
|
|
138 | "push %%esi\n\t" |
|
|
139 | "push %%edi\n\t" |
|
|
140 | "push %%ebp\n\t" |
|
|
141 | "mov %%esp, %0\n\t" |
|
|
142 | "mov %1, %%esp\n\t" |
|
|
143 | "pop %%ebp\n\t" |
|
|
144 | "pop %%edi\n\t" |
|
|
145 | "pop %%esi\n\t" |
|
|
146 | "pop %%ebx\n\t" |
|
|
147 | #else |
|
|
148 | # error unsupported architecture |
|
|
149 | #endif |
|
|
150 | : "=m" (prev->sp) |
|
|
151 | : "m" (next->sp) |
|
|
152 | ); |
|
|
153 | } |
|
|
154 | #endif |
|
|
155 | |
89 | /* initialize a machine state */ |
156 | /* initialize a machine state */ |
90 | void coro_create(coro_context *ctx, |
157 | void coro_create (coro_context *ctx, |
91 | coro_func coro, void *arg, |
158 | coro_func coro, void *arg, |
92 | void *sptr, long ssize) |
159 | void *sptr, long ssize) |
93 | { |
160 | { |
94 | #if CORO_UCONTEXT |
161 | #if CORO_UCONTEXT |
95 | |
162 | |
96 | getcontext (&(ctx->uc)); |
163 | getcontext (&(ctx->uc)); |
97 | |
164 | |
… | |
… | |
100 | ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize); |
167 | ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize); |
101 | ctx->uc.uc_stack.ss_flags = 0; |
168 | ctx->uc.uc_stack.ss_flags = 0; |
102 | |
169 | |
103 | makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); |
170 | makecontext (&(ctx->uc), (void (*)()) coro, 1, arg); |
104 | |
171 | |
105 | #elif CORO_SJLJ || CORO_LOOSE || CORO_LINUX || CORO_IRIX |
172 | #elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
106 | |
173 | |
107 | # if CORO_SJLJ |
174 | # if CORO_SJLJ |
108 | stack_t ostk, nstk; |
175 | stack_t ostk, nstk; |
109 | struct sigaction osa, nsa; |
176 | struct sigaction osa, nsa; |
110 | sigset_t nsig, osig; |
177 | sigset_t nsig, osig; |
… | |
… | |
127 | nsa.sa_handler = trampoline; |
194 | nsa.sa_handler = trampoline; |
128 | sigemptyset (&nsa.sa_mask); |
195 | sigemptyset (&nsa.sa_mask); |
129 | nsa.sa_flags = SA_ONSTACK; |
196 | nsa.sa_flags = SA_ONSTACK; |
130 | |
197 | |
131 | if (sigaction (SIGUSR2, &nsa, &osa)) |
198 | if (sigaction (SIGUSR2, &nsa, &osa)) |
|
|
199 | { |
132 | perror ("sigaction"); |
200 | perror ("sigaction"); |
|
|
201 | abort (); |
|
|
202 | } |
133 | |
203 | |
134 | /* set the new stack */ |
204 | /* set the new stack */ |
135 | nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */ |
205 | nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */ |
136 | nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize); |
206 | nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize); |
137 | nstk.ss_flags = 0; |
207 | nstk.ss_flags = 0; |
138 | |
208 | |
139 | if (sigaltstack (&nstk, &ostk) < 0) |
209 | if (sigaltstack (&nstk, &ostk) < 0) |
|
|
210 | { |
140 | perror ("sigaltstack"); |
211 | perror ("sigaltstack"); |
|
|
212 | abort (); |
|
|
213 | } |
141 | |
214 | |
142 | trampoline_count = 0; |
215 | trampoline_count = 0; |
143 | kill (getpid (), SIGUSR2); |
216 | kill (getpid (), SIGUSR2); |
144 | sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); |
217 | sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); |
145 | |
218 | |
… | |
… | |
156 | abort (); |
229 | abort (); |
157 | |
230 | |
158 | if (~ostk.ss_flags & SS_DISABLE) |
231 | if (~ostk.ss_flags & SS_DISABLE) |
159 | sigaltstack (&ostk, 0); |
232 | sigaltstack (&ostk, 0); |
160 | |
233 | |
161 | sigaction (SIGUSR1, &osa, 0); |
234 | sigaction (SIGUSR2, &osa, 0); |
162 | |
235 | |
163 | sigprocmask (SIG_SETMASK, &osig, 0); |
236 | sigprocmask (SIG_SETMASK, &osig, 0); |
164 | |
237 | |
165 | # elif CORO_LOOSE |
238 | # elif CORO_LOSER |
166 | |
239 | |
167 | setjmp (ctx->env); |
240 | setjmp (ctx->env); |
|
|
241 | #if __CYGWIN__ |
168 | ctx->env[7] = (long)((char *)sptr + ssize); |
242 | ctx->env[7] = (long)((char *)sptr + ssize); |
169 | ctx->env[8] = (long)coro_init; |
243 | ctx->env[8] = (long)coro_init; |
|
|
244 | #elif defined(_M_IX86) |
|
|
245 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; |
|
|
246 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); |
|
|
247 | #elif defined(_M_AMD64) |
|
|
248 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; |
|
|
249 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
|
|
250 | #elif defined(_M_IA64) |
|
|
251 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; |
|
|
252 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); |
|
|
253 | #else |
|
|
254 | #error "microsoft libc or architecture not supported" |
|
|
255 | #endif |
170 | |
256 | |
171 | # elif CORO_LINUX |
257 | # elif CORO_LINUX |
172 | |
258 | |
173 | setjmp (ctx->env); |
259 | _setjmp (ctx->env); |
174 | #if defined(__GLIBC__) && defined(__GLIBC_MINOR__) \ |
|
|
175 | && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined(JB_PC) && defined(JB_SP) |
260 | #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) |
176 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
261 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
177 | ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr,ssize); |
262 | ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize); |
178 | #elif defined(__GLIBC__) && defined(__GLIBC_MINOR__) \ |
|
|
179 | && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined(__mc68000__) |
263 | #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) |
180 | ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
264 | ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
181 | ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); |
265 | ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); |
182 | #elif defined(__GNU_LIBRARY__) && defined(__i386__) |
266 | #elif defined (__GNU_LIBRARY__) && defined (__i386__) |
183 | ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; |
267 | ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; |
184 | ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); |
268 | ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); |
185 | #elif defined(__GNU_LIBRARY__) && defined(__amd64__) |
269 | #elif defined (__GNU_LIBRARY__) && defined (__amd64__) |
186 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
270 | ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; |
187 | ctx->env[0].__jmpbuf[JB_RSP] = (long)((char *)sptr + ssize); |
271 | ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize); |
188 | #else |
272 | #else |
189 | #error "linux libc or architecture not supported" |
273 | # error "linux libc or architecture not supported" |
190 | #endif |
274 | #endif |
191 | |
275 | |
192 | # elif CORO_IRIX |
276 | # elif CORO_IRIX |
193 | |
277 | |
194 | setjmp (ctx->env); |
278 | setjmp (ctx->env); |
195 | ctx->env[JB_PC] = (__uint64_t)coro_init; |
279 | ctx->env[JB_PC] = (__uint64_t)coro_init; |
196 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr,ssize); |
280 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize); |
|
|
281 | |
|
|
282 | # elif CORO_ASM |
|
|
283 | |
|
|
284 | ctx->sp = (volatile void **)(ssize + (char *)sptr); |
|
|
285 | *--ctx->sp = (void *)coro_init; |
|
|
286 | *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp |
|
|
287 | ctx->sp -= NUM_CLOBBERED; |
197 | |
288 | |
198 | # endif |
289 | # endif |
199 | |
290 | |
200 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
291 | coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro); |
201 | |
292 | |
202 | #else |
293 | #else |
203 | error unsupported architecture |
294 | # error unsupported architecture |
204 | #endif |
295 | #endif |
205 | } |
296 | } |
206 | |
297 | |