ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
Revision: 1.38
Committed: Fri Nov 7 20:12:26 2008 UTC (15 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.37: +125 -83 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * Copyright (c) 2001-2008 Marc Alexander Lehmann <schmorp@schmorp.de>
3 *
4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
22 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
23 * OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Alternatively, the contents of this file may be used under the terms of
26 * the GNU General Public License ("GPL") version 2 or any later version,
27 * in which case the provisions of the GPL are applicable instead of
28 * the above. If you wish to allow the use of your version of this file
29 * only under the terms of the GPL and not to allow others to use your
30 * version of this file under the BSD license, indicate your decision
31 * by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL. If you do not delete the
33 * provisions above, a recipient may use your version of this file under
34 * either the BSD or the GPL.
35 *
36 * This library is modelled strictly after Ralf S. Engelschalls article at
37 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must
38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */
40
41 #include "coro.h"
42
43 #include <string.h>
44
45 #if !defined(STACK_ADJUST_PTR)
46 /* IRIX is decidedly NON-unix */
47 # if __sgi
48 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
49 # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
50 # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
51 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
52 # define STACK_ADJUST_SIZE(sp,ss) (ss)
53 # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
54 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
55 # define STACK_ADJUST_SIZE(sp,ss) (ss)
56 # else
57 # define STACK_ADJUST_PTR(sp,ss) (sp)
58 # define STACK_ADJUST_SIZE(sp,ss) (ss)
59 # endif
60 #endif
61
62 #if CORO_UCONTEXT
63 # include <stddef.h>
64 #endif
65
66 #if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
67
68 # include <stdlib.h>
69
70 # if CORO_SJLJ
71 # include <stdio.h>
72 # include <signal.h>
73 # include <unistd.h>
74 # endif
75
76 static volatile coro_func coro_init_func;
77 static volatile void *coro_init_arg;
78 static volatile coro_context *new_coro, *create_coro;
79
80 /* what we really want to detect here is wether we use a new-enough version of GAS */
81 /* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
82 # if __GNUC__ >= 3 && __ELF__ && __linux__
83 # define HAVE_CFI 1
84 # endif
85
86 static void
87 coro_init (void)
88 {
89 volatile coro_func func = coro_init_func;
90 volatile void *arg = coro_init_arg;
91
92 coro_transfer ((coro_context *)new_coro, (coro_context *)create_coro);
93
94 func ((void *)arg);
95
96 /* the new coro returned. bad. just abort() for now */
97 abort ();
98 }
99
100 # if CORO_SJLJ
101
102 static volatile int trampoline_count;
103
104 /* trampoline signal handler */
105 static void
106 trampoline (int sig)
107 {
108 if (setjmp (((coro_context *)new_coro)->env))
109 {
110 # if HAVE_CFI
111 asm (".cfi_startproc");
112 # endif
113 coro_init (); /* start it */
114 # if HAVE_CFI
115 asm (".cfi_endproc");
116 # endif
117 }
118 else
119 trampoline_count++;
120 }
121
122 # endif
123
124 #endif
125
126 #if CORO_ASM
127
128 asm (
129 ".text\n"
130 ".globl coro_transfer\n"
131 ".type coro_transfer, @function\n"
132 "coro_transfer:\n"
133 # if __amd64
134 # define NUM_SAVED 6
135 "\tpush %rbp\n"
136 "\tpush %rbx\n"
137 "\tpush %r12\n"
138 "\tpush %r13\n"
139 "\tpush %r14\n"
140 "\tpush %r15\n"
141 "\tmov %rsp, (%rdi)\n"
142 "\tmov (%rsi), %rsp\n"
143 "\tpop %r15\n"
144 "\tpop %r14\n"
145 "\tpop %r13\n"
146 "\tpop %r12\n"
147 "\tpop %rbx\n"
148 "\tpop %rbp\n"
149 # elif __i386
150 # define NUM_SAVED 4
151 "\tpush %ebp\n"
152 "\tpush %ebx\n"
153 "\tpush %esi\n"
154 "\tpush %edi\n"
155 "\tmov %esp, (%eax)\n"
156 "\tmov (%edx), %esp\n"
157 "\tpop %edi\n"
158 "\tpop %esi\n"
159 "\tpop %ebx\n"
160 "\tpop %ebp\n"
161 # else
162 # error unsupported architecture
163 # endif
164 "\tret\n"
165 );
166
167 #endif
168
169 #if CORO_PTHREAD
170
171 /* this mutex will be locked by the running coroutine */
172 pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
173
174 struct coro_init_args
175 {
176 coro_func func;
177 void *arg;
178 coro_context *self, *main;
179 };
180
181 static pthread_t null_tid;
182
183 /* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
184 static void
185 mutex_unlock_wrapper (void *arg)
186 {
187 pthread_mutex_unlock ((pthread_mutex_t *)arg);
188 }
189
190 static void *
191 trampoline (void *args_)
192 {
193 struct coro_init_args *args = (struct coro_init_args *)args_;
194 coro_func func = args->func;
195 void *arg = args->arg;
196
197 pthread_mutex_lock (&coro_mutex);
198
199 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
200 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
201 coro_transfer (args->self, args->main);
202 func (arg);
203 pthread_cleanup_pop (1);
204
205 return 0;
206 }
207
208 void
209 coro_transfer (coro_context *prev, coro_context *next)
210 {
211 pthread_cond_signal (&next->cv);
212 pthread_cond_wait (&prev->cv, &coro_mutex);
213 }
214
215 void
216 coro_destroy (coro_context *ctx)
217 {
218 if (!pthread_equal (ctx->id, null_tid))
219 {
220 pthread_cancel (ctx->id);
221 pthread_mutex_unlock (&coro_mutex);
222 pthread_join (ctx->id, 0);
223 pthread_mutex_lock (&coro_mutex);
224 }
225
226 pthread_cond_destroy (&ctx->cv);
227 }
228
229 #endif
230
231 /* initialize a machine state */
232 void
233 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
234 {
235 #if CORO_UCONTEXT
236
237 if (!coro)
238 return;
239
240 getcontext (&(ctx->uc));
241
242 ctx->uc.uc_link = 0;
243 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
244 ctx->uc.uc_stack.ss_size = (size_t)STACK_ADJUST_SIZE (sptr,ssize);
245 ctx->uc.uc_stack.ss_flags = 0;
246
247 makecontext (&(ctx->uc), (void (*)())coro, 1, arg);
248
249 #elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
250
251 # if CORO_SJLJ
252 stack_t ostk, nstk;
253 struct sigaction osa, nsa;
254 sigset_t nsig, osig;
255 # endif
256 coro_context nctx;
257
258 if (!coro)
259 return;
260
261 coro_init_func = coro;
262 coro_init_arg = arg;
263
264 new_coro = ctx;
265 create_coro = &nctx;
266
267 # if CORO_SJLJ
268 /* we use SIGUSR2. first block it, then fiddle with it. */
269
270 sigemptyset (&nsig);
271 sigaddset (&nsig, SIGUSR2);
272 sigprocmask (SIG_BLOCK, &nsig, &osig);
273
274 nsa.sa_handler = trampoline;
275 sigemptyset (&nsa.sa_mask);
276 nsa.sa_flags = SA_ONSTACK;
277
278 if (sigaction (SIGUSR2, &nsa, &osa))
279 {
280 perror ("sigaction");
281 abort ();
282 }
283
284 /* set the new stack */
285 nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */
286 nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize);
287 nstk.ss_flags = 0;
288
289 if (sigaltstack (&nstk, &ostk) < 0)
290 {
291 perror ("sigaltstack");
292 abort ();
293 }
294
295 trampoline_count = 0;
296 kill (getpid (), SIGUSR2);
297 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
298
299 while (!trampoline_count)
300 sigsuspend (&nsig);
301
302 sigaltstack (0, &nstk);
303 nstk.ss_flags = SS_DISABLE;
304 if (sigaltstack (&nstk, 0) < 0)
305 perror ("sigaltstack");
306
307 sigaltstack (0, &nstk);
308 if (~nstk.ss_flags & SS_DISABLE)
309 abort ();
310
311 if (~ostk.ss_flags & SS_DISABLE)
312 sigaltstack (&ostk, 0);
313
314 sigaction (SIGUSR2, &osa, 0);
315
316 sigprocmask (SIG_SETMASK, &osig, 0);
317
318 # elif CORO_LOSER
319
320 setjmp (ctx->env);
321 #if __CYGWIN__
322 ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long);
323 ctx->env[8] = (long)coro_init;
324 #elif defined(_M_IX86)
325 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
326 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
327 #elif defined(_M_AMD64)
328 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
329 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
330 #elif defined(_M_IA64)
331 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
332 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
333 #else
334 # error "microsoft libc or architecture not supported"
335 #endif
336
337 # elif CORO_LINUX
338
339 _setjmp (ctx->env);
340 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
341 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
342 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
343 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
344 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
345 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long);
346 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
347 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
348 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
349 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
350 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
351 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
352 #else
353 # error "linux libc or architecture not supported"
354 #endif
355
356 # elif CORO_IRIX
357
358 setjmp (ctx->env);
359 ctx->env[JB_PC] = (__uint64_t)coro_init;
360 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
361
362 # elif CORO_ASM
363
364 ctx->sp = (volatile void **)(ssize + (char *)sptr);
365 *--ctx->sp = (void *)abort; /* needed for alignment only */
366 *--ctx->sp = (void *)coro_init;
367 ctx->sp -= NUM_SAVED;
368
369 # endif
370
371 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
372
373 # elif CORO_PTHREAD
374
375 static coro_context nctx;
376 static int once;
377
378 if (!once)
379 {
380 once = 1;
381
382 pthread_mutex_lock (&coro_mutex);
383 pthread_cond_init (&nctx.cv, 0);
384 null_tid = pthread_self ();
385 }
386
387 pthread_cond_init (&ctx->cv, 0);
388
389 if (coro)
390 {
391 pthread_attr_t attr;
392 struct coro_init_args args;
393
394 args.func = coro;
395 args.arg = arg;
396 args.self = ctx;
397 args.main = &nctx;
398
399 pthread_attr_init (&attr);
400 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
401 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
402 pthread_create (&ctx->id, &attr, trampoline, &args);
403
404 coro_transfer (args.main, args.self);
405 }
406 else
407 ctx->id = null_tid;
408
409 #else
410 # error unsupported backend
411 #endif
412 }
413