ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.27 by root, Wed May 2 05:53:26 2007 UTC vs.
Revision 1.62 by root, Mon Aug 8 22:00:18 2011 UTC

1/* 1/*
2 * Copyright (c) 2001-2006 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer. 8 * this list of conditions and the following disclaimer.
9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 *
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 * 13 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 22 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26 * OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THE POSSIBILITY OF SUCH DAMAGE.
27 * 24 *
25 * Alternatively, the contents of this file may be used under the terms of
26 * the GNU General Public License ("GPL") version 2 or any later version,
27 * in which case the provisions of the GPL are applicable instead of
28 * the above. If you wish to allow the use of your version of this file
29 * only under the terms of the GPL and not to allow others to use your
30 * version of this file under the BSD license, indicate your decision
31 * by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL. If you do not delete the
33 * provisions above, a recipient may use your version of this file under
34 * either the BSD or the GPL.
35 *
28 * This library is modelled strictly after Ralf S. Engelschalls article at 36 * This library is modelled strictly after Ralf S. Engelschalls article at
29 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must 37 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must
30 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
31 */ 39 */
32 40
33#include "coro.h" 41#include "coro.h"
34 42
43#include <string.h>
44
45/*****************************************************************************/
46/* ucontext/setjmp/asm backends */
47/*****************************************************************************/
48#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
49
50# if CORO_UCONTEXT
51# include <stddef.h>
52# endif
53
35#if !defined(STACK_ADJUST_PTR) 54# if !defined(STACK_ADJUST_PTR)
55# if __sgi
36/* IRIX is decidedly NON-unix */ 56/* IRIX is decidedly NON-unix */
37# if __sgi
38# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 57# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
39# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 58# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
40# elif __i386__ && CORO_LINUX 59# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
41# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 60# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
42# define STACK_ADJUST_SIZE(sp,ss) (ss) 61# define STACK_ADJUST_SIZE(sp,ss) (ss)
43# elif __amd64__ && CORO_LINUX 62# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
44# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 63# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
45# define STACK_ADJUST_SIZE(sp,ss) (ss) 64# define STACK_ADJUST_SIZE(sp,ss) (ss)
46# else 65# else
47# define STACK_ADJUST_PTR(sp,ss) (sp) 66# define STACK_ADJUST_PTR(sp,ss) (sp)
48# define STACK_ADJUST_SIZE(sp,ss) (ss) 67# define STACK_ADJUST_SIZE(sp,ss) (ss)
49# endif 68# endif
50#endif 69# endif
51 70
52#if CORO_UCONTEXT
53# include <stddef.h>
54#endif
55
56#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
57
58#include <stdlib.h> 71# include <stdlib.h>
59 72
60#if CORO_SJLJ 73# if CORO_SJLJ
61# include <stdio.h> 74# include <stdio.h>
62# include <signal.h> 75# include <signal.h>
63# include <unistd.h> 76# include <unistd.h>
64#endif 77# endif
65 78
66static volatile coro_func coro_init_func; 79static coro_func coro_init_func;
67static volatile void *coro_init_arg; 80static void *coro_init_arg;
68static volatile coro_context *new_coro, *create_coro; 81static coro_context *new_coro, *create_coro;
69
70/* what we really want to detect here is wether we use a new-enough version of GAS */
71/* instead, check for gcc 3 and ELF and hope for the best */
72#if __GNUC__ >= 3 && __ELF__
73# define HAVE_CFI 1
74#endif
75 82
76static void 83static void
77coro_init (void) 84coro_init (void)
78{ 85{
79 volatile coro_func func = coro_init_func; 86 volatile coro_func func = coro_init_func;
80 volatile void *arg = coro_init_arg; 87 volatile void *arg = coro_init_arg;
81 88
82 coro_transfer ((coro_context *)new_coro, (coro_context *)create_coro); 89 coro_transfer (new_coro, create_coro);
90
91#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
92 asm (".cfi_undefined rip");
93#endif
83 94
84 func ((void *)arg); 95 func ((void *)arg);
85 96
86 /* the new coro returned. bad. just abort() for now */ 97 /* the new coro returned. bad. just abort() for now */
87 abort (); 98 abort ();
88} 99}
89 100
90# if CORO_SJLJ 101# if CORO_SJLJ
91 102
92static volatile int trampoline_count; 103static volatile int trampoline_done;
93 104
94/* trampoline signal handler */ 105/* trampoline signal handler */
95static void 106static void
96trampoline (int sig) 107trampoline (int sig)
97{ 108{
98 if (setjmp (((coro_context *)new_coro)->env)) 109 if (coro_setjmp (new_coro->env))
99 {
100#if HAVE_CFI
101 asm (".cfi_startproc");
102#endif
103 coro_init (); /* start it */ 110 coro_init (); /* start it */
104#if HAVE_CFI
105 asm (".cfi_endproc");
106#endif
107 }
108 else 111 else
109 trampoline_count++; 112 trampoline_done = 1;
110} 113}
111 114
112# endif
113
114#endif 115# endif
115 116
116#if CORO_ASM 117# if CORO_ASM
117void __attribute__((__noinline__, __fastcall__)) 118
118coro_transfer (struct coro_context *prev, struct coro_context *next) 119 #if _WIN32
119{ 120 #define CORO_WIN_TIB 1
120 asm volatile ( 121 #endif
122
123 asm (
124 "\t.text\n"
125 "\t.globl coro_transfer\n"
126 "coro_transfer:\n"
127 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
128 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
121#if __amd64 129 #if __amd64
122# define NUM_CLOBBERED 5 130 #ifdef WIN32
123 "push %%rbx\n\t" 131 /* TODO: xmm6..15 also would need to be saved. sigh. */
124 "push %%r12\n\t" 132 #define NUM_SAVED 8
125 "push %%r13\n\t" 133 "\tpushq %rsi\n"
126 "push %%r14\n\t" 134 "\tpushq %rdi\n"
127 "push %%r15\n\t" 135 "\tpushq %rbp\n"
128 "mov %%rsp, %0\n\t" 136 "\tpushq %rbx\n"
129 "mov %1, %%rsp\n\t" 137 "\tpushq %r12\n"
130 "pop %%r15\n\t" 138 "\tpushq %r13\n"
131 "pop %%r14\n\t" 139 "\tpushq %r14\n"
132 "pop %%r13\n\t" 140 "\tpushq %r15\n"
133 "pop %%r12\n\t" 141 #if CORO_WIN_TIB
134 "pop %%rbx\n\t" 142 "\tpushq %fs:0x0\n"
143 "\tpushq %fs:0x8\n"
144 "\tpushq %fs:0xc\n"
145 #endif
146 "\tmovq %rsp, (%rcx)\n"
147 "\tmovq (%rdx), %rsp\n"
148 #if CORO_WIN_TIB
149 "\tpopq %fs:0xc\n"
150 "\tpopq %fs:0x8\n"
151 "\tpopq %fs:0x0\n"
152 #endif
153 "\tpopq %r15\n"
154 "\tpopq %r14\n"
155 "\tpopq %r13\n"
156 "\tpopq %r12\n"
157 "\tpopq %rbx\n"
158 "\tpopq %rbp\n"
159 "\tpopq %rdi\n"
160 "\tpopq %rsi\n"
161 #else
162 #define NUM_SAVED 6
163 "\tpushq %rbp\n"
164 "\tpushq %rbx\n"
165 "\tpushq %r12\n"
166 "\tpushq %r13\n"
167 "\tpushq %r14\n"
168 "\tpushq %r15\n"
169 "\tmovq %rsp, (%rdi)\n"
170 "\tmovq (%rsi), %rsp\n"
171 "\tpopq %r15\n"
172 "\tpopq %r14\n"
173 "\tpopq %r13\n"
174 "\tpopq %r12\n"
175 "\tpopq %rbx\n"
176 "\tpopq %rbp\n"
177 #endif
135#elif __i386 178 #elif __i386
136# define NUM_CLOBBERED 4 179 #define NUM_SAVED 4
137 "push %%ebx\n\t" 180 "\tpushl %ebp\n"
138 "push %%esi\n\t" 181 "\tpushl %ebx\n"
139 "push %%edi\n\t" 182 "\tpushl %esi\n"
140 "push %%ebp\n\t" 183 "\tpushl %edi\n"
141 "mov %%esp, %0\n\t" 184 #if CORO_WIN_TIB
142 "mov %1, %%esp\n\t" 185 "\tpushl %fs:0\n"
143 "pop %%ebp\n\t" 186 "\tpushl %fs:4\n"
144 "pop %%edi\n\t" 187 "\tpushl %fs:8\n"
145 "pop %%esi\n\t" 188 #endif
146 "pop %%ebx\n\t" 189 "\tmovl %esp, (%eax)\n"
147#else 190 "\tmovl (%edx), %esp\n"
191 #if CORO_WIN_TIB
192 "\tpopl %fs:8\n"
193 "\tpopl %fs:4\n"
194 "\tpopl %fs:0\n"
195 #endif
196 "\tpopl %edi\n"
197 "\tpopl %esi\n"
198 "\tpopl %ebx\n"
199 "\tpopl %ebp\n"
200 #else
148# error unsupported architecture 201 #error unsupported architecture
149#endif 202 #endif
150 : "=m" (prev->sp) 203 "\tret\n"
151 : "m" (next->sp)
152 ); 204 );
153} 205
154#endif 206# endif
155 207
156/* initialize a machine state */ 208void
157void coro_create (coro_context *ctx, 209coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
158 coro_func coro, void *arg,
159 void *sptr, long ssize)
160{ 210{
161#if CORO_UCONTEXT 211 coro_context nctx;
162
163 getcontext (&(ctx->uc));
164
165 ctx->uc.uc_link = 0;
166 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
167 ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize);
168 ctx->uc.uc_stack.ss_flags = 0;
169
170 makecontext (&(ctx->uc), (void (*)()) coro, 1, arg);
171
172#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
173
174# if CORO_SJLJ 212# if CORO_SJLJ
175 stack_t ostk, nstk; 213 stack_t ostk, nstk;
176 struct sigaction osa, nsa; 214 struct sigaction osa, nsa;
177 sigset_t nsig, osig; 215 sigset_t nsig, osig;
178# endif 216# endif
179 coro_context nctx; 217
218 if (!coro)
219 return;
180 220
181 coro_init_func = coro; 221 coro_init_func = coro;
182 coro_init_arg = arg; 222 coro_init_arg = arg;
183 223
184 new_coro = ctx; 224 new_coro = ctx;
200 perror ("sigaction"); 240 perror ("sigaction");
201 abort (); 241 abort ();
202 } 242 }
203 243
204 /* set the new stack */ 244 /* set the new stack */
205 nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */ 245 nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */
206 nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize); 246 nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize);
207 nstk.ss_flags = 0; 247 nstk.ss_flags = 0;
208 248
209 if (sigaltstack (&nstk, &ostk) < 0) 249 if (sigaltstack (&nstk, &ostk) < 0)
210 { 250 {
211 perror ("sigaltstack"); 251 perror ("sigaltstack");
212 abort (); 252 abort ();
213 } 253 }
214 254
215 trampoline_count = 0; 255 trampoline_done = 0;
216 kill (getpid (), SIGUSR2); 256 kill (getpid (), SIGUSR2);
217 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); 257 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
218 258
219 while (!trampoline_count) 259 while (!trampoline_done)
220 sigsuspend (&nsig); 260 sigsuspend (&nsig);
221 261
222 sigaltstack (0, &nstk); 262 sigaltstack (0, &nstk);
223 nstk.ss_flags = SS_DISABLE; 263 nstk.ss_flags = SS_DISABLE;
224 if (sigaltstack (&nstk, 0) < 0) 264 if (sigaltstack (&nstk, 0) < 0)
230 270
231 if (~ostk.ss_flags & SS_DISABLE) 271 if (~ostk.ss_flags & SS_DISABLE)
232 sigaltstack (&ostk, 0); 272 sigaltstack (&ostk, 0);
233 273
234 sigaction (SIGUSR2, &osa, 0); 274 sigaction (SIGUSR2, &osa, 0);
235
236 sigprocmask (SIG_SETMASK, &osig, 0); 275 sigprocmask (SIG_SETMASK, &osig, 0);
237 276
238# elif CORO_LOSER 277# elif CORO_LOSER
239 278
240 setjmp (ctx->env); 279 coro_setjmp (ctx->env);
241#if __CYGWIN__ 280 #if __CYGWIN__ && __i386
242 ctx->env[7] = (long)((char *)sptr + ssize); 281 ctx->env[8] = (long) coro_init;
243 ctx->env[8] = (long)coro_init; 282 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
283 #elif __CYGWIN__ && __x86_64
284 ctx->env[7] = (long) coro_init;
285 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
286 #elif defined(__MINGW32__)
287 ctx->env[5] = (long) coro_init;
288 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
244#elif defined(_M_IX86) 289 #elif defined(_M_IX86)
245 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; 290 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
246 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr,ssize); 291 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
247#elif defined(_M_AMD64) 292 #elif defined(_M_AMD64)
248 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; 293 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
249 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 294 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
250#elif defined(_M_IA64) 295 #elif defined(_M_IA64)
251 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; 296 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
252 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr,ssize); 297 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
298 #else
299 #error "microsoft libc or architecture not supported"
300 #endif
301
302# elif CORO_LINUX
303
304 coro_setjmp (ctx->env);
305 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
306 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
307 ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
308 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
309 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
310 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
311 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
312 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
313 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
314 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
315 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
316 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
317 #else
318 #error "linux libc or architecture not supported"
319 #endif
320
321# elif CORO_IRIX
322
323 coro_setjmp (ctx->env, 0);
324 ctx->env[JB_PC] = (__uint64_t)coro_init;
325 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
326
327# elif CORO_ASM
328
329 ctx->sp = (void **)(ssize + (char *)sptr);
330 *--ctx->sp = (void *)abort; /* needed for alignment only */
331 *--ctx->sp = (void *)coro_init;
332
333 #if CORO_WIN_TIB
334 *--ctx->sp = 0; /* ExceptionList */
335 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
336 *--ctx->sp = sptr; /* StackLimit */
337 #endif
338
339 ctx->sp -= NUM_SAVED;
340 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED);
341
342# elif CORO_UCONTEXT
343
344 getcontext (&(ctx->uc));
345
346 ctx->uc.uc_link = 0;
347 ctx->uc.uc_stack.ss_sp = sptr;
348 ctx->uc.uc_stack.ss_size = (size_t)ssize;
349 ctx->uc.uc_stack.ss_flags = 0;
350
351 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
352
353# endif
354
355 coro_transfer (create_coro, new_coro);
356}
357
358/*****************************************************************************/
359/* pthread backend */
360/*****************************************************************************/
361#elif CORO_PTHREAD
362
363/* this mutex will be locked by the running coroutine */
364pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
365
366struct coro_init_args
367{
368 coro_func func;
369 void *arg;
370 coro_context *self, *main;
371};
372
373static pthread_t null_tid;
374
375/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
376static void
377mutex_unlock_wrapper (void *arg)
378{
379 pthread_mutex_unlock ((pthread_mutex_t *)arg);
380}
381
382static void *
383coro_init (void *args_)
384{
385 struct coro_init_args *args = (struct coro_init_args *)args_;
386 coro_func func = args->func;
387 void *arg = args->arg;
388
389 pthread_mutex_lock (&coro_mutex);
390
391 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
392 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
393 coro_transfer (args->self, args->main);
394 func (arg);
395 pthread_cleanup_pop (1);
396
397 return 0;
398}
399
400void
401coro_transfer (coro_context *prev, coro_context *next)
402{
403 pthread_cond_signal (&next->cv);
404 pthread_cond_wait (&prev->cv, &coro_mutex);
405#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */
406 pthread_testcancel ();
407#endif
408}
409
410void
411coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
412{
413 static coro_context nctx;
414 static int once;
415
416 if (!once)
417 {
418 once = 1;
419
420 pthread_mutex_lock (&coro_mutex);
421 pthread_cond_init (&nctx.cv, 0);
422 null_tid = pthread_self ();
423 }
424
425 pthread_cond_init (&ctx->cv, 0);
426
427 if (coro)
428 {
429 pthread_attr_t attr;
430 struct coro_init_args args;
431
432 args.func = coro;
433 args.arg = arg;
434 args.self = ctx;
435 args.main = &nctx;
436
437 pthread_attr_init (&attr);
438#if __UCLIBC__
439 /* exists, but is borked */
440 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/
441#elif __CYGWIN__
442 /* POSIX, not here */
443 pthread_attr_setstacksize (&attr, (size_t)ssize);
253#else 444#else
254#error "microsoft libc or architecture not supported" 445 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
255#endif 446#endif
447 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
448 pthread_create (&ctx->id, &attr, coro_init, &args);
256 449
257# elif CORO_LINUX 450 coro_transfer (args.main, args.self);
451 }
452 else
453 ctx->id = null_tid;
454}
258 455
259 _setjmp (ctx->env); 456void
260#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) 457coro_destroy (coro_context *ctx)
261 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 458{
262 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize); 459 if (!pthread_equal (ctx->id, null_tid))
263#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) 460 {
264 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 461 pthread_cancel (ctx->id);
265 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize); 462 pthread_mutex_unlock (&coro_mutex);
266#elif defined (__GNU_LIBRARY__) && defined (__i386__) 463 pthread_join (ctx->id, 0);
267 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; 464 pthread_mutex_lock (&coro_mutex);
268 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize); 465 }
269#elif defined (__GNU_LIBRARY__) && defined (__amd64__) 466
270 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 467 pthread_cond_destroy (&ctx->cv);
271 ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize); 468}
469
272#else 470#else
273# error "linux libc or architecture not supported" 471# error unsupported backend
274#endif 472#endif
275 473
276# elif CORO_IRIX
277
278 setjmp (ctx->env);
279 ctx->env[JB_PC] = (__uint64_t)coro_init;
280 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize);
281
282# elif CORO_ASM
283
284 ctx->sp = (volatile void **)(ssize + (char *)sptr);
285 *--ctx->sp = (void *)coro_init;
286 *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp
287 ctx->sp -= NUM_CLOBBERED;
288
289# endif
290
291 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
292
293#else
294# error unsupported architecture
295#endif
296}
297

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines