ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.38 by root, Fri Nov 7 20:12:26 2008 UTC vs.
Revision 1.66 by root, Fri Dec 7 14:21:09 2012 UTC

1/* 1/*
2 * Copyright (c) 2001-2008 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer. 8 * this list of conditions and the following disclaimer.
9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <stddef.h>
43#include <string.h> 44#include <string.h>
44 45
46/*****************************************************************************/
47/* ucontext/setjmp/asm backends */
48/*****************************************************************************/
49#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
50
51# if CORO_UCONTEXT
52# include <stddef.h>
53# endif
54
45#if !defined(STACK_ADJUST_PTR) 55# if !defined(STACK_ADJUST_PTR)
56# if __sgi
46/* IRIX is decidedly NON-unix */ 57/* IRIX is decidedly NON-unix */
47# if __sgi
48# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 58# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
49# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 59# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
50# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) 60# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
51# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 61# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
52# define STACK_ADJUST_SIZE(sp,ss) (ss) 62# define STACK_ADJUST_SIZE(sp,ss) (ss)
53# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) 63# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
54# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 64# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
55# define STACK_ADJUST_SIZE(sp,ss) (ss) 65# define STACK_ADJUST_SIZE(sp,ss) (ss)
56# else 66# else
57# define STACK_ADJUST_PTR(sp,ss) (sp) 67# define STACK_ADJUST_PTR(sp,ss) (sp)
58# define STACK_ADJUST_SIZE(sp,ss) (ss) 68# define STACK_ADJUST_SIZE(sp,ss) (ss)
59# endif 69# endif
60#endif 70# endif
61
62#if CORO_UCONTEXT
63# include <stddef.h>
64#endif
65
66#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
67 71
68# include <stdlib.h> 72# include <stdlib.h>
69 73
70# if CORO_SJLJ 74# if CORO_SJLJ
71# include <stdio.h> 75# include <stdio.h>
72# include <signal.h> 76# include <signal.h>
73# include <unistd.h> 77# include <unistd.h>
74# endif 78# endif
75 79
76static volatile coro_func coro_init_func; 80static coro_func coro_init_func;
77static volatile void *coro_init_arg; 81static void *coro_init_arg;
78static volatile coro_context *new_coro, *create_coro; 82static coro_context *new_coro, *create_coro;
79
80/* what we really want to detect here is wether we use a new-enough version of GAS */
81/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
82# if __GNUC__ >= 3 && __ELF__ && __linux__
83# define HAVE_CFI 1
84# endif
85 83
86static void 84static void
87coro_init (void) 85coro_init (void)
88{ 86{
89 volatile coro_func func = coro_init_func; 87 volatile coro_func func = coro_init_func;
90 volatile void *arg = coro_init_arg; 88 volatile void *arg = coro_init_arg;
91 89
92 coro_transfer ((coro_context *)new_coro, (coro_context *)create_coro); 90 coro_transfer (new_coro, create_coro);
91
92#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
93 asm (".cfi_undefined rip");
94#endif
93 95
94 func ((void *)arg); 96 func ((void *)arg);
95 97
96 /* the new coro returned. bad. just abort() for now */ 98 /* the new coro returned. bad. just abort() for now */
97 abort (); 99 abort ();
98} 100}
99 101
100# if CORO_SJLJ 102# if CORO_SJLJ
101 103
102static volatile int trampoline_count; 104static volatile int trampoline_done;
103 105
104/* trampoline signal handler */ 106/* trampoline signal handler */
105static void 107static void
106trampoline (int sig) 108trampoline (int sig)
107{ 109{
108 if (setjmp (((coro_context *)new_coro)->env)) 110 if (coro_setjmp (new_coro->env))
111 coro_init (); /* start it */
112 else
113 trampoline_done = 1;
114}
115
116# endif
117
118# if CORO_ASM
119
120 #if _WIN32 || __CYGWIN__
121 #define CORO_WIN_TIB 1
122 #endif
123
124 asm (
125 "\t.text\n"
126 #if _WIN32 || __CYGWIN__
127 "\t.globl _coro_transfer\n"
128 "_coro_transfer:\n"
129 #else
130 "\t.globl coro_transfer\n"
131 "coro_transfer:\n"
132 #endif
133 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
134 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
135 #if __amd64
136
137 #if _WIN32 || __CYGWIN__
138 #define NUM_SAVED 29
139 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */
140 "\tmovaps %xmm6, (%rsp)\n"
141 "\tmovaps %xmm7, 16(%rsp)\n"
142 "\tmovaps %xmm8, 32(%rsp)\n"
143 "\tmovaps %xmm9, 48(%rsp)\n"
144 "\tmovaps %xmm10, 64(%rsp)\n"
145 "\tmovaps %xmm11, 80(%rsp)\n"
146 "\tmovaps %xmm12, 96(%rsp)\n"
147 "\tmovaps %xmm13, 112(%rsp)\n"
148 "\tmovaps %xmm14, 128(%rsp)\n"
149 "\tmovaps %xmm15, 144(%rsp)\n"
150 "\tpushq %rsi\n"
151 "\tpushq %rdi\n"
152 "\tpushq %rbp\n"
153 "\tpushq %rbx\n"
154 "\tpushq %r12\n"
155 "\tpushq %r13\n"
156 "\tpushq %r14\n"
157 "\tpushq %r15\n"
158 #if CORO_WIN_TIB
159 "\tpushq %fs:0x0\n"
160 "\tpushq %fs:0x8\n"
161 "\tpushq %fs:0xc\n"
162 #endif
163 "\tmovq %rsp, (%rcx)\n"
164 "\tmovq (%rdx), %rsp\n"
165 #if CORO_WIN_TIB
166 "\tpopq %fs:0xc\n"
167 "\tpopq %fs:0x8\n"
168 "\tpopq %fs:0x0\n"
169 #endif
170 "\tpopq %r15\n"
171 "\tpopq %r14\n"
172 "\tpopq %r13\n"
173 "\tpopq %r12\n"
174 "\tpopq %rbx\n"
175 "\tpopq %rbp\n"
176 "\tpopq %rdi\n"
177 "\tpopq %rsi\n"
178 "\tmovaps (%rsp), %xmm6\n"
179 "\tmovaps 16(%rsp), %xmm7\n"
180 "\tmovaps 32(%rsp), %xmm8\n"
181 "\tmovaps 48(%rsp), %xmm9\n"
182 "\tmovaps 64(%rsp), %xmm10\n"
183 "\tmovaps 80(%rsp), %xmm11\n"
184 "\tmovaps 96(%rsp), %xmm12\n"
185 "\tmovaps 112(%rsp), %xmm13\n"
186 "\tmovaps 128(%rsp), %xmm14\n"
187 "\tmovaps 144(%rsp), %xmm15\n"
188 "\taddq $168, %rsp\n"
189 #else
190 #define NUM_SAVED 6
191 "\tpushq %rbp\n"
192 "\tpushq %rbx\n"
193 "\tpushq %r12\n"
194 "\tpushq %r13\n"
195 "\tpushq %r14\n"
196 "\tpushq %r15\n"
197 "\tmovq %rsp, (%rdi)\n"
198 "\tmovq (%rsi), %rsp\n"
199 "\tpopq %r15\n"
200 "\tpopq %r14\n"
201 "\tpopq %r13\n"
202 "\tpopq %r12\n"
203 "\tpopq %rbx\n"
204 "\tpopq %rbp\n"
205 #endif
206 "\tpopq %rcx\n"
207 "\tjmpq *%rcx\n"
208
209 #elif __i386
210
211 #define NUM_SAVED 4
212 "\tpushl %ebp\n"
213 "\tpushl %ebx\n"
214 "\tpushl %esi\n"
215 "\tpushl %edi\n"
216 #if CORO_WIN_TIB
217 #undef NUM_SAVED
218 #define NUM_SAVED 7
219 "\tpushl %fs:0\n"
220 "\tpushl %fs:4\n"
221 "\tpushl %fs:8\n"
222 #endif
223 "\tmovl %esp, (%eax)\n"
224 "\tmovl (%edx), %esp\n"
225 #if CORO_WIN_TIB
226 "\tpopl %fs:8\n"
227 "\tpopl %fs:4\n"
228 "\tpopl %fs:0\n"
229 #endif
230 "\tpopl %edi\n"
231 "\tpopl %esi\n"
232 "\tpopl %ebx\n"
233 "\tpopl %ebp\n"
234 "\tpopl %ecx\n"
235 "\tjmpl *%ecx\n"
236
237 #else
238 #error unsupported architecture
239 #endif
240 );
241
242# endif
243
244void
245coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
246{
247 coro_context nctx;
248# if CORO_SJLJ
249 stack_t ostk, nstk;
250 struct sigaction osa, nsa;
251 sigset_t nsig, osig;
252# endif
253
254 if (!coro)
255 return;
256
257 coro_init_func = coro;
258 coro_init_arg = arg;
259
260 new_coro = ctx;
261 create_coro = &nctx;
262
263# if CORO_SJLJ
264 /* we use SIGUSR2. first block it, then fiddle with it. */
265
266 sigemptyset (&nsig);
267 sigaddset (&nsig, SIGUSR2);
268 sigprocmask (SIG_BLOCK, &nsig, &osig);
269
270 nsa.sa_handler = trampoline;
271 sigemptyset (&nsa.sa_mask);
272 nsa.sa_flags = SA_ONSTACK;
273
274 if (sigaction (SIGUSR2, &nsa, &osa))
109 { 275 {
110# if HAVE_CFI 276 perror ("sigaction");
111 asm (".cfi_startproc"); 277 abort ();
112# endif
113 coro_init (); /* start it */
114# if HAVE_CFI
115 asm (".cfi_endproc");
116# endif
117 } 278 }
279
280 /* set the new stack */
281 nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */
282 nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize);
283 nstk.ss_flags = 0;
284
285 if (sigaltstack (&nstk, &ostk) < 0)
286 {
287 perror ("sigaltstack");
288 abort ();
289 }
290
291 trampoline_done = 0;
292 kill (getpid (), SIGUSR2);
293 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
294
295 while (!trampoline_done)
296 sigsuspend (&nsig);
297
298 sigaltstack (0, &nstk);
299 nstk.ss_flags = SS_DISABLE;
300 if (sigaltstack (&nstk, 0) < 0)
301 perror ("sigaltstack");
302
303 sigaltstack (0, &nstk);
304 if (~nstk.ss_flags & SS_DISABLE)
305 abort ();
306
307 if (~ostk.ss_flags & SS_DISABLE)
308 sigaltstack (&ostk, 0);
309
310 sigaction (SIGUSR2, &osa, 0);
311 sigprocmask (SIG_SETMASK, &osig, 0);
312
313# elif CORO_LOSER
314
315 coro_setjmp (ctx->env);
316 #if __CYGWIN__ && __i386
317 ctx->env[8] = (long) coro_init;
318 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
319 #elif __CYGWIN__ && __x86_64
320 ctx->env[7] = (long) coro_init;
321 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
322 #elif defined __MINGW32__
323 ctx->env[5] = (long) coro_init;
324 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
325 #elif defined _M_IX86
326 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
327 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
328 #elif defined _M_AMD64
329 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
330 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
331 #elif defined _M_IA64
332 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
333 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
118 else 334 #else
119 trampoline_count++; 335 #error "microsoft libc or architecture not supported"
120}
121
122# endif 336 #endif
123 337
338# elif CORO_LINUX
339
340 coro_setjmp (ctx->env);
341 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
342 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
343 ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
344 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
345 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
346 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
347 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
348 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
349 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
350 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
351 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
352 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
353 #else
354 #error "linux libc or architecture not supported"
355 #endif
356
357# elif CORO_IRIX
358
359 coro_setjmp (ctx->env, 0);
360 ctx->env[JB_PC] = (__uint64_t)coro_init;
361 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
362
363# elif CORO_ASM
364
365 ctx->sp = (void **)(ssize + (char *)sptr);
366 *--ctx->sp = (void *)abort; /* needed for alignment only */
367 *--ctx->sp = (void *)coro_init;
368
369 #if CORO_WIN_TIB
370 *--ctx->sp = 0; /* ExceptionList */
371 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
372 *--ctx->sp = sptr; /* StackLimit */
373 #endif
374
375 ctx->sp -= NUM_SAVED;
376 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED);
377
378# elif CORO_UCONTEXT
379
380 getcontext (&(ctx->uc));
381
382 ctx->uc.uc_link = 0;
383 ctx->uc.uc_stack.ss_sp = sptr;
384 ctx->uc.uc_stack.ss_size = (size_t)ssize;
385 ctx->uc.uc_stack.ss_flags = 0;
386
387 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
388
124#endif 389# endif
125 390
126#if CORO_ASM 391 coro_transfer (create_coro, new_coro);
392}
127 393
128 asm ( 394/*****************************************************************************/
129 ".text\n" 395/* pthread backend */
130 ".globl coro_transfer\n" 396/*****************************************************************************/
131 ".type coro_transfer, @function\n"
132 "coro_transfer:\n"
133# if __amd64
134# define NUM_SAVED 6
135 "\tpush %rbp\n"
136 "\tpush %rbx\n"
137 "\tpush %r12\n"
138 "\tpush %r13\n"
139 "\tpush %r14\n"
140 "\tpush %r15\n"
141 "\tmov %rsp, (%rdi)\n"
142 "\tmov (%rsi), %rsp\n"
143 "\tpop %r15\n"
144 "\tpop %r14\n"
145 "\tpop %r13\n"
146 "\tpop %r12\n"
147 "\tpop %rbx\n"
148 "\tpop %rbp\n"
149# elif __i386
150# define NUM_SAVED 4
151 "\tpush %ebp\n"
152 "\tpush %ebx\n"
153 "\tpush %esi\n"
154 "\tpush %edi\n"
155 "\tmov %esp, (%eax)\n"
156 "\tmov (%edx), %esp\n"
157 "\tpop %edi\n"
158 "\tpop %esi\n"
159 "\tpop %ebx\n"
160 "\tpop %ebp\n"
161# else
162# error unsupported architecture
163# endif
164 "\tret\n"
165 );
166
167#endif
168
169#if CORO_PTHREAD 397#elif CORO_PTHREAD
170 398
171/* this mutex will be locked by the running coroutine */ 399/* this mutex will be locked by the running coroutine */
172pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; 400pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
173 401
174struct coro_init_args 402struct coro_init_args
186{ 414{
187 pthread_mutex_unlock ((pthread_mutex_t *)arg); 415 pthread_mutex_unlock ((pthread_mutex_t *)arg);
188} 416}
189 417
190static void * 418static void *
191trampoline (void *args_) 419coro_init (void *args_)
192{ 420{
193 struct coro_init_args *args = (struct coro_init_args *)args_; 421 struct coro_init_args *args = (struct coro_init_args *)args_;
194 coro_func func = args->func; 422 coro_func func = args->func;
195 void *arg = args->arg; 423 void *arg = args->arg;
196 424
208void 436void
209coro_transfer (coro_context *prev, coro_context *next) 437coro_transfer (coro_context *prev, coro_context *next)
210{ 438{
211 pthread_cond_signal (&next->cv); 439 pthread_cond_signal (&next->cv);
212 pthread_cond_wait (&prev->cv, &coro_mutex); 440 pthread_cond_wait (&prev->cv, &coro_mutex);
441#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */
442 pthread_testcancel ();
443#endif
444}
445
446void
447coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
448{
449 static coro_context nctx;
450 static int once;
451
452 if (!once)
453 {
454 once = 1;
455
456 pthread_mutex_lock (&coro_mutex);
457 pthread_cond_init (&nctx.cv, 0);
458 null_tid = pthread_self ();
459 }
460
461 pthread_cond_init (&ctx->cv, 0);
462
463 if (coro)
464 {
465 pthread_attr_t attr;
466 struct coro_init_args args;
467
468 args.func = coro;
469 args.arg = arg;
470 args.self = ctx;
471 args.main = &nctx;
472
473 pthread_attr_init (&attr);
474#if __UCLIBC__
475 /* exists, but is borked */
476 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/
477#elif __CYGWIN__
478 /* POSIX, not here */
479 pthread_attr_setstacksize (&attr, (size_t)ssize);
480#else
481 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
482#endif
483 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
484 pthread_create (&ctx->id, &attr, coro_init, &args);
485
486 coro_transfer (args.main, args.self);
487 }
488 else
489 ctx->id = null_tid;
213} 490}
214 491
215void 492void
216coro_destroy (coro_context *ctx) 493coro_destroy (coro_context *ctx)
217{ 494{
224 } 501 }
225 502
226 pthread_cond_destroy (&ctx->cv); 503 pthread_cond_destroy (&ctx->cv);
227} 504}
228 505
229#endif 506/*****************************************************************************/
507/* fiber backend */
508/*****************************************************************************/
509#elif CORO_FIBER
230 510
231/* initialize a machine state */ 511#define WIN32_LEAN_AND_MEAN
512#if _WIN32_WINNT < 0x0400
513 #undef _WIN32_WINNT
514 #define _WIN32_WINNT 0x0400
515#endif
516#include <windows.h>
517
518VOID CALLBACK
519coro_init (PVOID arg)
520{
521 coro_context *ctx = (coro_context *)arg;
522
523 ctx->coro (ctx->arg);
524}
525
232void 526void
527coro_transfer (coro_context *prev, coro_context *next)
528{
529 if (!prev->fiber)
530 {
531 prev->fiber = GetCurrentFiber ();
532
533 if (prev->fiber == 0 || prev->fiber == (void *)0x1e00)
534 prev->fiber = ConvertThreadToFiber (0);
535 }
536
537 SwitchToFiber (next->fiber);
538}
539
540void
233coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize) 541coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
234{ 542{
235#if CORO_UCONTEXT 543 ctx->fiber = 0;
544 ctx->coro = coro;
545 ctx->arg = arg;
236 546
237 if (!coro) 547 if (!coro)
238 return; 548 return;
239 549
240 getcontext (&(ctx->uc)); 550 ctx->fiber = CreateFiber (ssize, coro_init, ctx);
551}
241 552
242 ctx->uc.uc_link = 0; 553void
243 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize); 554coro_destroy (coro_context *ctx)
244 ctx->uc.uc_stack.ss_size = (size_t)STACK_ADJUST_SIZE (sptr,ssize); 555{
245 ctx->uc.uc_stack.ss_flags = 0; 556 DeleteFiber (ctx->fiber);
557}
246 558
247 makecontext (&(ctx->uc), (void (*)())coro, 1, arg); 559#else
560 #error unsupported backend
561#endif
248 562
249#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 563/*****************************************************************************/
564/* stack management */
565/*****************************************************************************/
566#if CORO_STACKALLOC
250 567
251# if CORO_SJLJ 568#include <stdlib.h>
252 stack_t ostk, nstk; 569
253 struct sigaction osa, nsa; 570#ifndef _WIN32
254 sigset_t nsig, osig; 571# include <unistd.h>
572#endif
573
574#if CORO_USE_VALGRIND
575# include <valgrind/valgrind.h>
576#endif
577
578#if _POSIX_MAPPED_FILES
579# include <sys/mman.h>
580# define CORO_MMAP 1
581# ifndef MAP_ANONYMOUS
582# ifdef MAP_ANON
583# define MAP_ANONYMOUS MAP_ANON
584# else
585# undef CORO_MMAP
255# endif 586# endif
256 coro_context nctx; 587# endif
588# include <limits.h>
589#else
590# undef CORO_MMAP
591#endif
257 592
258 if (!coro) 593#if _POSIX_MEMORY_PROTECTION
259 return; 594# ifndef CORO_GUARDPAGES
595# define CORO_GUARDPAGES 4
596# endif
597#else
598# undef CORO_GUARDPAGES
599#endif
260 600
261 coro_init_func = coro; 601#if !CORO_MMAP
262 coro_init_arg = arg; 602# undef CORO_GUARDPAGES
603#endif
263 604
264 new_coro = ctx; 605#if !__i386 && !__x86_64 && !__powerpc && !__m68k && !__alpha && !__mips && !__sparc64
265 create_coro = &nctx; 606# undef CORO_GUARDPAGES
607#endif
266 608
267# if CORO_SJLJ 609#ifndef CORO_GUARDPAGES
268 /* we use SIGUSR2. first block it, then fiddle with it. */ 610# define CORO_GUARDPAGES 0
611#endif
269 612
270 sigemptyset (&nsig); 613#if !PAGESIZE
271 sigaddset (&nsig, SIGUSR2); 614 #if !CORO_MMAP
272 sigprocmask (SIG_BLOCK, &nsig, &osig); 615 #define PAGESIZE 4096
273 616 #else
274 nsa.sa_handler = trampoline; 617 static size_t
275 sigemptyset (&nsa.sa_mask); 618 coro_pagesize (void)
276 nsa.sa_flags = SA_ONSTACK;
277
278 if (sigaction (SIGUSR2, &nsa, &osa))
279 { 619 {
280 perror ("sigaction"); 620 static size_t pagesize;
281 abort (); 621
622 if (!pagesize)
623 pagesize = sysconf (_SC_PAGESIZE);
624
625 return pagesize;
282 } 626 }
283 627
284 /* set the new stack */ 628 #define PAGESIZE coro_pagesize ()
285 nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */ 629 #endif
286 nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize); 630#endif
287 nstk.ss_flags = 0;
288 631
289 if (sigaltstack (&nstk, &ostk) < 0) 632int
290 { 633coro_stack_alloc (struct coro_stack *stack, unsigned int size)
291 perror ("sigaltstack"); 634{
292 abort (); 635 if (!size)
293 } 636 size = 256 * 1024;
294 637
295 trampoline_count = 0; 638 stack->sptr = 0;
296 kill (getpid (), SIGUSR2); 639 stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE;
297 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
298 640
299 while (!trampoline_count) 641#if CORO_FIBER
300 sigsuspend (&nsig);
301 642
302 sigaltstack (0, &nstk); 643 stack->sptr = (void *)stack;
303 nstk.ss_flags = SS_DISABLE; 644 return 1;
304 if (sigaltstack (&nstk, 0) < 0)
305 perror ("sigaltstack");
306 645
307 sigaltstack (0, &nstk);
308 if (~nstk.ss_flags & SS_DISABLE)
309 abort ();
310
311 if (~ostk.ss_flags & SS_DISABLE)
312 sigaltstack (&ostk, 0);
313
314 sigaction (SIGUSR2, &osa, 0);
315
316 sigprocmask (SIG_SETMASK, &osig, 0);
317
318# elif CORO_LOSER
319
320 setjmp (ctx->env);
321#if __CYGWIN__
322 ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long);
323 ctx->env[8] = (long)coro_init;
324#elif defined(_M_IX86)
325 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
326 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
327#elif defined(_M_AMD64)
328 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
329 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
330#elif defined(_M_IA64)
331 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
332 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
333#else 646#else
334# error "microsoft libc or architecture not supported" 647
648 size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE;
649 void *base;
650
651 #if CORO_MMAP
652 /* mmap supposedly does allocate-on-write for us */
653 base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
654
655 if (base == (void *)-1)
656 {
657 /* some systems don't let us have executable heap */
658 /* we assume they won't need executable stack in that case */
659 base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
660
661 if (base == (void *)-1)
662 return 0;
663 }
664
665 #if CORO_GUARDPAGES
666 mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE);
667 #endif
668
669 base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE);
670 #else
671 base = malloc (ssze);
672 if (!base)
673 return 0;
335#endif 674 #endif
336 675
337# elif CORO_LINUX 676 #if CORO_USE_VALGRIND
677 stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, (char *)ssze - CORO_GUARDPAGES * PAGESIZE);
678 #endif
338 679
339 _setjmp (ctx->env); 680 stack->sptr = base;
340#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) 681 return 1;
341 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init; 682
342 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 683#endif
343#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) 684}
344 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 685
345 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long); 686void
346#elif defined (__GNU_LIBRARY__) && defined (__i386__) 687coro_stack_free (struct coro_stack *stack)
347 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init; 688{
348 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long); 689#if CORO_FIBER
349#elif defined (__GNU_LIBRARY__) && defined (__amd64__) 690 /* nop */
350 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
351 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
352#else 691#else
353# error "linux libc or architecture not supported" 692 #if CORO_USE_VALGRIND
693 VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
354#endif 694 #endif
355 695
356# elif CORO_IRIX 696 #if CORO_MMAP
357 697 if (stack->sptr)
358 setjmp (ctx->env); 698 munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
359 ctx->env[JB_PC] = (__uint64_t)coro_init; 699 stack->ssze + CORO_GUARDPAGES * PAGESIZE);
360 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
361
362# elif CORO_ASM
363
364 ctx->sp = (volatile void **)(ssize + (char *)sptr);
365 *--ctx->sp = (void *)abort; /* needed for alignment only */
366 *--ctx->sp = (void *)coro_init;
367 ctx->sp -= NUM_SAVED;
368
369# endif
370
371 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
372
373# elif CORO_PTHREAD
374
375 static coro_context nctx;
376 static int once;
377
378 if (!once)
379 {
380 once = 1;
381
382 pthread_mutex_lock (&coro_mutex);
383 pthread_cond_init (&nctx.cv, 0);
384 null_tid = pthread_self ();
385 }
386
387 pthread_cond_init (&ctx->cv, 0);
388
389 if (coro)
390 {
391 pthread_attr_t attr;
392 struct coro_init_args args;
393
394 args.func = coro;
395 args.arg = arg;
396 args.self = ctx;
397 args.main = &nctx;
398
399 pthread_attr_init (&attr);
400 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
401 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
402 pthread_create (&ctx->id, &attr, trampoline, &args);
403
404 coro_transfer (args.main, args.self);
405 }
406 else 700 #else
407 ctx->id = null_tid; 701 free (stack->sptr);
408
409#else
410# error unsupported backend
411#endif 702 #endif
703#endif
412} 704}
413 705
706#endif
707

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines