1 |
/* |
2 |
* Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de> |
3 |
* |
4 |
* Redistribution and use in source and binary forms, with or without modifica- |
5 |
* tion, are permitted provided that the following conditions are met: |
6 |
* |
7 |
* 1. Redistributions of source code must retain the above copyright notice, |
8 |
* this list of conditions and the following disclaimer. |
9 |
* |
10 |
* 2. Redistributions in binary form must reproduce the above copyright |
11 |
* notice, this list of conditions and the following disclaimer in the |
12 |
* documentation and/or other materials provided with the distribution. |
13 |
* |
14 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
15 |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
16 |
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
17 |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
18 |
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
20 |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
21 |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
22 |
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
23 |
* OF THE POSSIBILITY OF SUCH DAMAGE. |
24 |
* |
25 |
* Alternatively, the contents of this file may be used under the terms of |
26 |
* the GNU General Public License ("GPL") version 2 or any later version, |
27 |
* in which case the provisions of the GPL are applicable instead of |
28 |
* the above. If you wish to allow the use of your version of this file |
29 |
* only under the terms of the GPL and not to allow others to use your |
30 |
* version of this file under the BSD license, indicate your decision |
31 |
* by deleting the provisions above and replace them with the notice |
32 |
* and other provisions required by the GPL. If you do not delete the |
33 |
* provisions above, a recipient may use your version of this file under |
34 |
* either the BSD or the GPL. |
35 |
* |
36 |
* This library is modelled strictly after Ralf S. Engelschalls article at |
37 |
* http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must |
38 |
* go to Ralf S. Engelschall <rse@engelschall.com>. |
39 |
*/ |
40 |
|
41 |
#include "coro.h" |
42 |
|
43 |
#include <stddef.h> |
44 |
#include <string.h> |
45 |
|
46 |
/*****************************************************************************/ |
47 |
/* ucontext/setjmp/asm backends */ |
48 |
/*****************************************************************************/ |
49 |
#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
50 |
|
51 |
# if CORO_UCONTEXT |
52 |
# include <stddef.h> |
53 |
# endif |
54 |
|
55 |
# if !defined(STACK_ADJUST_PTR) |
56 |
# if __sgi |
57 |
/* IRIX is decidedly NON-unix */ |
58 |
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
59 |
# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
60 |
# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) |
61 |
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
62 |
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
63 |
# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) |
64 |
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
65 |
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
66 |
# else |
67 |
# define STACK_ADJUST_PTR(sp,ss) (sp) |
68 |
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
69 |
# endif |
70 |
# endif |
71 |
|
72 |
# include <stdlib.h> |
73 |
|
74 |
# if CORO_SJLJ |
75 |
# include <stdio.h> |
76 |
# include <signal.h> |
77 |
# include <unistd.h> |
78 |
# endif |
79 |
|
80 |
static coro_func coro_init_func; |
81 |
static void *coro_init_arg; |
82 |
static coro_context *new_coro, *create_coro; |
83 |
|
84 |
static void |
85 |
coro_init (void) |
86 |
{ |
87 |
volatile coro_func func = coro_init_func; |
88 |
volatile void *arg = coro_init_arg; |
89 |
|
90 |
coro_transfer (new_coro, create_coro); |
91 |
|
92 |
#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64 |
93 |
asm (".cfi_undefined rip"); |
94 |
#endif |
95 |
|
96 |
func ((void *)arg); |
97 |
|
98 |
/* the new coro returned. bad. just abort() for now */ |
99 |
abort (); |
100 |
} |
101 |
|
102 |
# if CORO_SJLJ |
103 |
|
104 |
static volatile int trampoline_done; |
105 |
|
106 |
/* trampoline signal handler */ |
107 |
static void |
108 |
trampoline (int sig) |
109 |
{ |
110 |
if (coro_setjmp (new_coro->env)) |
111 |
coro_init (); /* start it */ |
112 |
else |
113 |
trampoline_done = 1; |
114 |
} |
115 |
|
116 |
# endif |
117 |
|
118 |
# if CORO_ASM |
119 |
|
120 |
#if __arm__ && \ |
121 |
(defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
122 |
|| defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ \ |
123 |
|| __ARM_ARCH == 7) |
124 |
#define CORO_ARM 1 |
125 |
#endif |
126 |
|
127 |
#if _WIN32 || __CYGWIN__ |
128 |
#define CORO_WIN_TIB 1 |
129 |
#endif |
130 |
|
131 |
asm ( |
132 |
"\t.text\n" |
133 |
#if _WIN32 || __CYGWIN__ |
134 |
"\t.globl _coro_transfer\n" |
135 |
"_coro_transfer:\n" |
136 |
#else |
137 |
"\t.globl coro_transfer\n" |
138 |
"coro_transfer:\n" |
139 |
#endif |
140 |
/* windows, of course, gives a shit on the amd64 ABI and uses different registers */ |
141 |
/* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */ |
142 |
#if __amd64 |
143 |
|
144 |
#if _WIN32 || __CYGWIN__ |
145 |
#define NUM_SAVED 29 |
146 |
"\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */ |
147 |
"\tmovaps %xmm6, (%rsp)\n" |
148 |
"\tmovaps %xmm7, 16(%rsp)\n" |
149 |
"\tmovaps %xmm8, 32(%rsp)\n" |
150 |
"\tmovaps %xmm9, 48(%rsp)\n" |
151 |
"\tmovaps %xmm10, 64(%rsp)\n" |
152 |
"\tmovaps %xmm11, 80(%rsp)\n" |
153 |
"\tmovaps %xmm12, 96(%rsp)\n" |
154 |
"\tmovaps %xmm13, 112(%rsp)\n" |
155 |
"\tmovaps %xmm14, 128(%rsp)\n" |
156 |
"\tmovaps %xmm15, 144(%rsp)\n" |
157 |
"\tpushq %rsi\n" |
158 |
"\tpushq %rdi\n" |
159 |
"\tpushq %rbp\n" |
160 |
"\tpushq %rbx\n" |
161 |
"\tpushq %r12\n" |
162 |
"\tpushq %r13\n" |
163 |
"\tpushq %r14\n" |
164 |
"\tpushq %r15\n" |
165 |
#if CORO_WIN_TIB |
166 |
"\tpushq %fs:0x0\n" |
167 |
"\tpushq %fs:0x8\n" |
168 |
"\tpushq %fs:0xc\n" |
169 |
#endif |
170 |
"\tmovq %rsp, (%rcx)\n" |
171 |
"\tmovq (%rdx), %rsp\n" |
172 |
#if CORO_WIN_TIB |
173 |
"\tpopq %fs:0xc\n" |
174 |
"\tpopq %fs:0x8\n" |
175 |
"\tpopq %fs:0x0\n" |
176 |
#endif |
177 |
"\tpopq %r15\n" |
178 |
"\tpopq %r14\n" |
179 |
"\tpopq %r13\n" |
180 |
"\tpopq %r12\n" |
181 |
"\tpopq %rbx\n" |
182 |
"\tpopq %rbp\n" |
183 |
"\tpopq %rdi\n" |
184 |
"\tpopq %rsi\n" |
185 |
"\tmovaps (%rsp), %xmm6\n" |
186 |
"\tmovaps 16(%rsp), %xmm7\n" |
187 |
"\tmovaps 32(%rsp), %xmm8\n" |
188 |
"\tmovaps 48(%rsp), %xmm9\n" |
189 |
"\tmovaps 64(%rsp), %xmm10\n" |
190 |
"\tmovaps 80(%rsp), %xmm11\n" |
191 |
"\tmovaps 96(%rsp), %xmm12\n" |
192 |
"\tmovaps 112(%rsp), %xmm13\n" |
193 |
"\tmovaps 128(%rsp), %xmm14\n" |
194 |
"\tmovaps 144(%rsp), %xmm15\n" |
195 |
"\taddq $168, %rsp\n" |
196 |
#else |
197 |
#define NUM_SAVED 6 |
198 |
"\tpushq %rbp\n" |
199 |
"\tpushq %rbx\n" |
200 |
"\tpushq %r12\n" |
201 |
"\tpushq %r13\n" |
202 |
"\tpushq %r14\n" |
203 |
"\tpushq %r15\n" |
204 |
"\tmovq %rsp, (%rdi)\n" |
205 |
"\tmovq (%rsi), %rsp\n" |
206 |
"\tpopq %r15\n" |
207 |
"\tpopq %r14\n" |
208 |
"\tpopq %r13\n" |
209 |
"\tpopq %r12\n" |
210 |
"\tpopq %rbx\n" |
211 |
"\tpopq %rbp\n" |
212 |
#endif |
213 |
"\tpopq %rcx\n" |
214 |
"\tjmpq *%rcx\n" |
215 |
|
216 |
#elif __i386__ |
217 |
|
218 |
#define NUM_SAVED 4 |
219 |
"\tpushl %ebp\n" |
220 |
"\tpushl %ebx\n" |
221 |
"\tpushl %esi\n" |
222 |
"\tpushl %edi\n" |
223 |
#if CORO_WIN_TIB |
224 |
#undef NUM_SAVED |
225 |
#define NUM_SAVED 7 |
226 |
"\tpushl %fs:0\n" |
227 |
"\tpushl %fs:4\n" |
228 |
"\tpushl %fs:8\n" |
229 |
#endif |
230 |
"\tmovl %esp, (%eax)\n" |
231 |
"\tmovl (%edx), %esp\n" |
232 |
#if CORO_WIN_TIB |
233 |
"\tpopl %fs:8\n" |
234 |
"\tpopl %fs:4\n" |
235 |
"\tpopl %fs:0\n" |
236 |
#endif |
237 |
"\tpopl %edi\n" |
238 |
"\tpopl %esi\n" |
239 |
"\tpopl %ebx\n" |
240 |
"\tpopl %ebp\n" |
241 |
"\tpopl %ecx\n" |
242 |
"\tjmpl *%ecx\n" |
243 |
|
244 |
#elif CORO_ARM /* untested, what about thumb, neon, iwmmxt? */ |
245 |
|
246 |
#if __ARM_PCS_VFP |
247 |
"\tvpush {d8-d15}\n" |
248 |
#define NUM_SAVED (9 + 8 * 2) |
249 |
#else |
250 |
#define NUM_SAVED 9 |
251 |
#endif |
252 |
"\tpush {r4-r11,lr}\n" |
253 |
"\tstr sp, [r0]\n" |
254 |
"\tldr sp, [r1]\n" |
255 |
"\tpop {r4-r11,lr}\n" |
256 |
#if __ARM_PCS_VFP |
257 |
"\tvpop {d8-d15}\n" |
258 |
#endif |
259 |
"\tmov r15, lr\n" |
260 |
|
261 |
#elif __mips__ && 0 /* untested, 32 bit only */ |
262 |
|
263 |
#define NUM_SAVED (12 + 8 * 2) |
264 |
/* TODO: n64/o64, lw=>ld */ |
265 |
|
266 |
"\t.set nomips16\n" |
267 |
"\t.frame $sp,112,$31\n" |
268 |
#if __mips_soft_float |
269 |
"\taddiu $sp,$sp,-44\n" |
270 |
#else |
271 |
"\taddiu $sp,$sp,-112\n" |
272 |
"\ts.d $f30,88($sp)\n" |
273 |
"\ts.d $f28,80($sp)\n" |
274 |
"\ts.d $f26,72($sp)\n" |
275 |
"\ts.d $f24,64($sp)\n" |
276 |
"\ts.d $f22,56($sp)\n" |
277 |
"\ts.d $f20,48($sp)\n" |
278 |
#endif |
279 |
"\tsw $28,40($sp)\n" |
280 |
"\tsw $31,36($sp)\n" |
281 |
"\tsw $fp,32($sp)\n" |
282 |
"\tsw $23,28($sp)\n" |
283 |
"\tsw $22,24($sp)\n" |
284 |
"\tsw $21,20($sp)\n" |
285 |
"\tsw $20,16($sp)\n" |
286 |
"\tsw $19,12($sp)\n" |
287 |
"\tsw $18,8($sp)\n" |
288 |
"\tsw $17,4($sp)\n" |
289 |
"\tsw $16,0($sp)\n" |
290 |
"\tsw $sp,0($4)\n" |
291 |
"\tlw $sp,0($5)\n" |
292 |
#if !__mips_soft_float |
293 |
"\tl.d $f30,88($sp)\n" |
294 |
"\tl.d $f28,80($sp)\n" |
295 |
"\tl.d $f26,72($sp)\n" |
296 |
"\tl.d $f24,64($sp)\n" |
297 |
"\tl.d $f22,56($sp)\n" |
298 |
"\tl.d $f20,48($sp)\n" |
299 |
#endif |
300 |
"\tlw $28,40($sp)\n" |
301 |
"\tlw $31,36($sp)\n" |
302 |
"\tlw $fp,32($sp)\n" |
303 |
"\tlw $23,28($sp)\n" |
304 |
"\tlw $22,24($sp)\n" |
305 |
"\tlw $21,20($sp)\n" |
306 |
"\tlw $20,16($sp)\n" |
307 |
"\tlw $19,12($sp)\n" |
308 |
"\tlw $18,8($sp)\n" |
309 |
"\tlw $17,4($sp)\n" |
310 |
"\tlw $16,0($sp)\n" |
311 |
"\tj $31\n" |
312 |
#if __mips_soft_float |
313 |
"\taddiu $sp,$sp,44\n" |
314 |
#else |
315 |
"\taddiu $sp,$sp,112\n" |
316 |
#endif |
317 |
|
318 |
#else |
319 |
#error unsupported architecture |
320 |
#endif |
321 |
); |
322 |
|
323 |
# endif |
324 |
|
325 |
void |
326 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
327 |
{ |
328 |
coro_context nctx; |
329 |
# if CORO_SJLJ |
330 |
stack_t ostk, nstk; |
331 |
struct sigaction osa, nsa; |
332 |
sigset_t nsig, osig; |
333 |
# endif |
334 |
|
335 |
if (!coro) |
336 |
return; |
337 |
|
338 |
coro_init_func = coro; |
339 |
coro_init_arg = arg; |
340 |
|
341 |
new_coro = ctx; |
342 |
create_coro = &nctx; |
343 |
|
344 |
# if CORO_SJLJ |
345 |
/* we use SIGUSR2. first block it, then fiddle with it. */ |
346 |
|
347 |
sigemptyset (&nsig); |
348 |
sigaddset (&nsig, SIGUSR2); |
349 |
sigprocmask (SIG_BLOCK, &nsig, &osig); |
350 |
|
351 |
nsa.sa_handler = trampoline; |
352 |
sigemptyset (&nsa.sa_mask); |
353 |
nsa.sa_flags = SA_ONSTACK; |
354 |
|
355 |
if (sigaction (SIGUSR2, &nsa, &osa)) |
356 |
{ |
357 |
perror ("sigaction"); |
358 |
abort (); |
359 |
} |
360 |
|
361 |
/* set the new stack */ |
362 |
nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */ |
363 |
nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize); |
364 |
nstk.ss_flags = 0; |
365 |
|
366 |
if (sigaltstack (&nstk, &ostk) < 0) |
367 |
{ |
368 |
perror ("sigaltstack"); |
369 |
abort (); |
370 |
} |
371 |
|
372 |
trampoline_done = 0; |
373 |
kill (getpid (), SIGUSR2); |
374 |
sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); |
375 |
|
376 |
while (!trampoline_done) |
377 |
sigsuspend (&nsig); |
378 |
|
379 |
sigaltstack (0, &nstk); |
380 |
nstk.ss_flags = SS_DISABLE; |
381 |
if (sigaltstack (&nstk, 0) < 0) |
382 |
perror ("sigaltstack"); |
383 |
|
384 |
sigaltstack (0, &nstk); |
385 |
if (~nstk.ss_flags & SS_DISABLE) |
386 |
abort (); |
387 |
|
388 |
if (~ostk.ss_flags & SS_DISABLE) |
389 |
sigaltstack (&ostk, 0); |
390 |
|
391 |
sigaction (SIGUSR2, &osa, 0); |
392 |
sigprocmask (SIG_SETMASK, &osig, 0); |
393 |
|
394 |
# elif CORO_LOSER |
395 |
|
396 |
coro_setjmp (ctx->env); |
397 |
#if __CYGWIN__ && __i386__ |
398 |
ctx->env[8] = (long) coro_init; |
399 |
ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); |
400 |
#elif __CYGWIN__ && __x86_64__ |
401 |
ctx->env[7] = (long) coro_init; |
402 |
ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); |
403 |
#elif defined __MINGW32__ |
404 |
ctx->env[5] = (long) coro_init; |
405 |
ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); |
406 |
#elif defined _M_IX86 |
407 |
((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; |
408 |
((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
409 |
#elif defined _M_AMD64 |
410 |
((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; |
411 |
((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); |
412 |
#elif defined _M_IA64 |
413 |
((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; |
414 |
((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); |
415 |
#else |
416 |
#error "microsoft libc or architecture not supported" |
417 |
#endif |
418 |
|
419 |
# elif CORO_LINUX |
420 |
|
421 |
coro_setjmp (ctx->env); |
422 |
#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) |
423 |
ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; |
424 |
ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
425 |
#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) |
426 |
ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
427 |
ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long); |
428 |
#elif defined (__GNU_LIBRARY__) && defined (__i386__) |
429 |
ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init; |
430 |
ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); |
431 |
#elif defined (__GNU_LIBRARY__) && defined (__x86_64__) |
432 |
ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; |
433 |
ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); |
434 |
#else |
435 |
#error "linux libc or architecture not supported" |
436 |
#endif |
437 |
|
438 |
# elif CORO_IRIX |
439 |
|
440 |
coro_setjmp (ctx->env, 0); |
441 |
ctx->env[JB_PC] = (__uint64_t)coro_init; |
442 |
ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
443 |
|
444 |
# elif CORO_ASM |
445 |
|
446 |
#if __i386__ || __x86_64__ |
447 |
ctx->sp = (void **)(ssize + (char *)sptr); |
448 |
*--ctx->sp = (void *)abort; /* needed for alignment only */ |
449 |
*--ctx->sp = (void *)coro_init; |
450 |
#if CORO_WIN_TIB |
451 |
*--ctx->sp = 0; /* ExceptionList */ |
452 |
*--ctx->sp = (char *)sptr + ssize; /* StackBase */ |
453 |
*--ctx->sp = sptr; /* StackLimit */ |
454 |
#endif |
455 |
#elif CORO_ARM |
456 |
/* return address stored in lr register, don't push anything */ |
457 |
#else |
458 |
#error unsupported architecture |
459 |
#endif |
460 |
|
461 |
ctx->sp -= NUM_SAVED; |
462 |
memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED); |
463 |
|
464 |
#if __i386__ || __x86_64__ |
465 |
/* done already */ |
466 |
#elif CORO_ARM |
467 |
ctx->sp[0] = coro; /* r4 */ |
468 |
ctx->sp[1] = arg; /* r5 */ |
469 |
ctx->sp[8] = (char *)coro_init; /* lr */ |
470 |
#else |
471 |
#error unsupported architecture |
472 |
#endif |
473 |
|
474 |
# elif CORO_UCONTEXT |
475 |
|
476 |
getcontext (&(ctx->uc)); |
477 |
|
478 |
ctx->uc.uc_link = 0; |
479 |
ctx->uc.uc_stack.ss_sp = sptr; |
480 |
ctx->uc.uc_stack.ss_size = (size_t)ssize; |
481 |
ctx->uc.uc_stack.ss_flags = 0; |
482 |
|
483 |
makecontext (&(ctx->uc), (void (*)())coro_init, 0); |
484 |
|
485 |
# endif |
486 |
|
487 |
coro_transfer (create_coro, new_coro); |
488 |
} |
489 |
|
490 |
/*****************************************************************************/ |
491 |
/* pthread backend */ |
492 |
/*****************************************************************************/ |
493 |
#elif CORO_PTHREAD |
494 |
|
495 |
/* this mutex will be locked by the running coroutine */ |
496 |
pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; |
497 |
|
498 |
struct coro_init_args |
499 |
{ |
500 |
coro_func func; |
501 |
void *arg; |
502 |
coro_context *self, *main; |
503 |
}; |
504 |
|
505 |
static pthread_t null_tid; |
506 |
|
507 |
/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */ |
508 |
static void |
509 |
mutex_unlock_wrapper (void *arg) |
510 |
{ |
511 |
pthread_mutex_unlock ((pthread_mutex_t *)arg); |
512 |
} |
513 |
|
514 |
static void * |
515 |
coro_init (void *args_) |
516 |
{ |
517 |
struct coro_init_args *args = (struct coro_init_args *)args_; |
518 |
coro_func func = args->func; |
519 |
void *arg = args->arg; |
520 |
|
521 |
pthread_mutex_lock (&coro_mutex); |
522 |
|
523 |
/* we try to be good citizens and use deferred cancellation and cleanup handlers */ |
524 |
pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex); |
525 |
coro_transfer (args->self, args->main); |
526 |
func (arg); |
527 |
pthread_cleanup_pop (1); |
528 |
|
529 |
return 0; |
530 |
} |
531 |
|
532 |
void |
533 |
coro_transfer (coro_context *prev, coro_context *next) |
534 |
{ |
535 |
pthread_cond_signal (&next->cv); |
536 |
pthread_cond_wait (&prev->cv, &coro_mutex); |
537 |
#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */ |
538 |
pthread_testcancel (); |
539 |
#endif |
540 |
} |
541 |
|
542 |
void |
543 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
544 |
{ |
545 |
static coro_context nctx; |
546 |
static int once; |
547 |
|
548 |
if (!once) |
549 |
{ |
550 |
once = 1; |
551 |
|
552 |
pthread_mutex_lock (&coro_mutex); |
553 |
pthread_cond_init (&nctx.cv, 0); |
554 |
null_tid = pthread_self (); |
555 |
} |
556 |
|
557 |
pthread_cond_init (&ctx->cv, 0); |
558 |
|
559 |
if (coro) |
560 |
{ |
561 |
pthread_attr_t attr; |
562 |
struct coro_init_args args; |
563 |
|
564 |
args.func = coro; |
565 |
args.arg = arg; |
566 |
args.self = ctx; |
567 |
args.main = &nctx; |
568 |
|
569 |
pthread_attr_init (&attr); |
570 |
#if __UCLIBC__ |
571 |
/* exists, but is borked */ |
572 |
/*pthread_attr_setstacksize (&attr, (size_t)ssize);*/ |
573 |
#elif __CYGWIN__ |
574 |
/* POSIX, not here */ |
575 |
pthread_attr_setstacksize (&attr, (size_t)ssize); |
576 |
#else |
577 |
pthread_attr_setstack (&attr, sptr, (size_t)ssize); |
578 |
#endif |
579 |
pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); |
580 |
pthread_create (&ctx->id, &attr, coro_init, &args); |
581 |
|
582 |
coro_transfer (args.main, args.self); |
583 |
} |
584 |
else |
585 |
ctx->id = null_tid; |
586 |
} |
587 |
|
588 |
void |
589 |
coro_destroy (coro_context *ctx) |
590 |
{ |
591 |
if (!pthread_equal (ctx->id, null_tid)) |
592 |
{ |
593 |
pthread_cancel (ctx->id); |
594 |
pthread_mutex_unlock (&coro_mutex); |
595 |
pthread_join (ctx->id, 0); |
596 |
pthread_mutex_lock (&coro_mutex); |
597 |
} |
598 |
|
599 |
pthread_cond_destroy (&ctx->cv); |
600 |
} |
601 |
|
602 |
/*****************************************************************************/ |
603 |
/* fiber backend */ |
604 |
/*****************************************************************************/ |
605 |
#elif CORO_FIBER |
606 |
|
607 |
#define WIN32_LEAN_AND_MEAN |
608 |
#if _WIN32_WINNT < 0x0400 |
609 |
#undef _WIN32_WINNT |
610 |
#define _WIN32_WINNT 0x0400 |
611 |
#endif |
612 |
#include <windows.h> |
613 |
|
614 |
VOID CALLBACK |
615 |
coro_init (PVOID arg) |
616 |
{ |
617 |
coro_context *ctx = (coro_context *)arg; |
618 |
|
619 |
ctx->coro (ctx->arg); |
620 |
} |
621 |
|
622 |
void |
623 |
coro_transfer (coro_context *prev, coro_context *next) |
624 |
{ |
625 |
if (!prev->fiber) |
626 |
{ |
627 |
prev->fiber = GetCurrentFiber (); |
628 |
|
629 |
if (prev->fiber == 0 || prev->fiber == (void *)0x1e00) |
630 |
prev->fiber = ConvertThreadToFiber (0); |
631 |
} |
632 |
|
633 |
SwitchToFiber (next->fiber); |
634 |
} |
635 |
|
636 |
void |
637 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
638 |
{ |
639 |
ctx->fiber = 0; |
640 |
ctx->coro = coro; |
641 |
ctx->arg = arg; |
642 |
|
643 |
if (!coro) |
644 |
return; |
645 |
|
646 |
ctx->fiber = CreateFiber (ssize, coro_init, ctx); |
647 |
} |
648 |
|
649 |
void |
650 |
coro_destroy (coro_context *ctx) |
651 |
{ |
652 |
DeleteFiber (ctx->fiber); |
653 |
} |
654 |
|
655 |
#else |
656 |
#error unsupported backend |
657 |
#endif |
658 |
|
659 |
/*****************************************************************************/ |
660 |
/* stack management */ |
661 |
/*****************************************************************************/ |
662 |
#if CORO_STACKALLOC |
663 |
|
664 |
#include <stdlib.h> |
665 |
|
666 |
#ifndef _WIN32 |
667 |
# include <unistd.h> |
668 |
#endif |
669 |
|
670 |
#if CORO_USE_VALGRIND |
671 |
# include <valgrind/valgrind.h> |
672 |
#endif |
673 |
|
674 |
#if _POSIX_MAPPED_FILES |
675 |
# include <sys/mman.h> |
676 |
# define CORO_MMAP 1 |
677 |
# ifndef MAP_ANONYMOUS |
678 |
# ifdef MAP_ANON |
679 |
# define MAP_ANONYMOUS MAP_ANON |
680 |
# else |
681 |
# undef CORO_MMAP |
682 |
# endif |
683 |
# endif |
684 |
# include <limits.h> |
685 |
#else |
686 |
# undef CORO_MMAP |
687 |
#endif |
688 |
|
689 |
#if _POSIX_MEMORY_PROTECTION |
690 |
# ifndef CORO_GUARDPAGES |
691 |
# define CORO_GUARDPAGES 4 |
692 |
# endif |
693 |
#else |
694 |
# undef CORO_GUARDPAGES |
695 |
#endif |
696 |
|
697 |
#if !CORO_MMAP |
698 |
# undef CORO_GUARDPAGES |
699 |
#endif |
700 |
|
701 |
#if !__i386__ && !__x86_64__ && !__powerpc__ && !__arm__ && !__aarch64__ && !__m68k__ && !__alpha__ && !__mips__ && !__sparc64__ |
702 |
# undef CORO_GUARDPAGES |
703 |
#endif |
704 |
|
705 |
#ifndef CORO_GUARDPAGES |
706 |
# define CORO_GUARDPAGES 0 |
707 |
#endif |
708 |
|
709 |
#if !PAGESIZE |
710 |
#if !CORO_MMAP |
711 |
#define PAGESIZE 4096 |
712 |
#else |
713 |
static size_t |
714 |
coro_pagesize (void) |
715 |
{ |
716 |
static size_t pagesize; |
717 |
|
718 |
if (!pagesize) |
719 |
pagesize = sysconf (_SC_PAGESIZE); |
720 |
|
721 |
return pagesize; |
722 |
} |
723 |
|
724 |
#define PAGESIZE coro_pagesize () |
725 |
#endif |
726 |
#endif |
727 |
|
728 |
int |
729 |
coro_stack_alloc (struct coro_stack *stack, unsigned int size) |
730 |
{ |
731 |
if (!size) |
732 |
size = 256 * 1024; |
733 |
|
734 |
stack->sptr = 0; |
735 |
stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE; |
736 |
|
737 |
#if CORO_FIBER |
738 |
|
739 |
stack->sptr = (void *)stack; |
740 |
return 1; |
741 |
|
742 |
#else |
743 |
|
744 |
size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE; |
745 |
void *base; |
746 |
|
747 |
#if CORO_MMAP |
748 |
/* mmap supposedly does allocate-on-write for us */ |
749 |
base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
750 |
|
751 |
if (base == (void *)-1) |
752 |
{ |
753 |
/* some systems don't let us have executable heap */ |
754 |
/* we assume they won't need executable stack in that case */ |
755 |
base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
756 |
|
757 |
if (base == (void *)-1) |
758 |
return 0; |
759 |
} |
760 |
|
761 |
#if CORO_GUARDPAGES |
762 |
mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE); |
763 |
#endif |
764 |
|
765 |
base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE); |
766 |
#else |
767 |
base = malloc (ssze); |
768 |
if (!base) |
769 |
return 0; |
770 |
#endif |
771 |
|
772 |
#if CORO_USE_VALGRIND |
773 |
stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE); |
774 |
#endif |
775 |
|
776 |
stack->sptr = base; |
777 |
return 1; |
778 |
|
779 |
#endif |
780 |
} |
781 |
|
782 |
void |
783 |
coro_stack_free (struct coro_stack *stack) |
784 |
{ |
785 |
#if CORO_FIBER |
786 |
/* nop */ |
787 |
#else |
788 |
#if CORO_USE_VALGRIND |
789 |
VALGRIND_STACK_DEREGISTER (stack->valgrind_id); |
790 |
#endif |
791 |
|
792 |
#if CORO_MMAP |
793 |
if (stack->sptr) |
794 |
munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE), |
795 |
stack->ssze + CORO_GUARDPAGES * PAGESIZE); |
796 |
#else |
797 |
free (stack->sptr); |
798 |
#endif |
799 |
#endif |
800 |
} |
801 |
|
802 |
#endif |
803 |
|