1 |
root |
1.1 |
/* |
2 |
root |
1.56 |
* Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de> |
3 |
root |
1.63 |
* |
4 |
root |
1.1 |
* Redistribution and use in source and binary forms, with or without modifica- |
5 |
|
|
* tion, are permitted provided that the following conditions are met: |
6 |
root |
1.63 |
* |
7 |
root |
1.1 |
* 1. Redistributions of source code must retain the above copyright notice, |
8 |
|
|
* this list of conditions and the following disclaimer. |
9 |
root |
1.63 |
* |
10 |
root |
1.1 |
* 2. Redistributions in binary form must reproduce the above copyright |
11 |
|
|
* notice, this list of conditions and the following disclaimer in the |
12 |
|
|
* documentation and/or other materials provided with the distribution. |
13 |
root |
1.63 |
* |
14 |
root |
1.1 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
15 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
16 |
|
|
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
17 |
|
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
18 |
|
|
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 |
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
20 |
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
21 |
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
22 |
|
|
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
23 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
24 |
|
|
* |
25 |
root |
1.29 |
* Alternatively, the contents of this file may be used under the terms of |
26 |
|
|
* the GNU General Public License ("GPL") version 2 or any later version, |
27 |
|
|
* in which case the provisions of the GPL are applicable instead of |
28 |
|
|
* the above. If you wish to allow the use of your version of this file |
29 |
|
|
* only under the terms of the GPL and not to allow others to use your |
30 |
|
|
* version of this file under the BSD license, indicate your decision |
31 |
|
|
* by deleting the provisions above and replace them with the notice |
32 |
|
|
* and other provisions required by the GPL. If you do not delete the |
33 |
|
|
* provisions above, a recipient may use your version of this file under |
34 |
|
|
* either the BSD or the GPL. |
35 |
|
|
* |
36 |
root |
1.1 |
* This library is modelled strictly after Ralf S. Engelschalls article at |
37 |
root |
1.29 |
* http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must |
38 |
root |
1.1 |
* go to Ralf S. Engelschall <rse@engelschall.com>. |
39 |
|
|
*/ |
40 |
|
|
|
41 |
|
|
#include "coro.h" |
42 |
|
|
|
43 |
root |
1.66 |
#include <stddef.h> |
44 |
root |
1.38 |
#include <string.h> |
45 |
|
|
|
46 |
root |
1.39 |
/*****************************************************************************/ |
47 |
|
|
/* ucontext/setjmp/asm backends */ |
48 |
|
|
/*****************************************************************************/ |
49 |
|
|
#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM |
50 |
|
|
|
51 |
|
|
# if CORO_UCONTEXT |
52 |
|
|
# include <stddef.h> |
53 |
|
|
# endif |
54 |
|
|
|
55 |
|
|
# if !defined(STACK_ADJUST_PTR) |
56 |
|
|
# if __sgi |
57 |
root |
1.5 |
/* IRIX is decidedly NON-unix */ |
58 |
root |
1.39 |
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
59 |
|
|
# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) |
60 |
|
|
# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) |
61 |
|
|
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) |
62 |
|
|
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
63 |
|
|
# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) |
64 |
|
|
# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) |
65 |
|
|
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
66 |
|
|
# else |
67 |
|
|
# define STACK_ADJUST_PTR(sp,ss) (sp) |
68 |
|
|
# define STACK_ADJUST_SIZE(sp,ss) (ss) |
69 |
|
|
# endif |
70 |
root |
1.8 |
# endif |
71 |
root |
1.1 |
|
72 |
root |
1.38 |
# include <stdlib.h> |
73 |
root |
1.17 |
|
74 |
root |
1.38 |
# if CORO_SJLJ |
75 |
|
|
# include <stdio.h> |
76 |
|
|
# include <signal.h> |
77 |
|
|
# include <unistd.h> |
78 |
|
|
# endif |
79 |
root |
1.1 |
|
80 |
root |
1.40 |
static coro_func coro_init_func; |
81 |
|
|
static void *coro_init_arg; |
82 |
|
|
static coro_context *new_coro, *create_coro; |
83 |
root |
1.1 |
|
84 |
|
|
static void |
85 |
|
|
coro_init (void) |
86 |
|
|
{ |
87 |
|
|
volatile coro_func func = coro_init_func; |
88 |
|
|
volatile void *arg = coro_init_arg; |
89 |
|
|
|
90 |
root |
1.40 |
coro_transfer (new_coro, create_coro); |
91 |
root |
1.1 |
|
92 |
root |
1.61 |
#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64 |
93 |
root |
1.71 |
/*asm (".cfi_startproc");*/ |
94 |
|
|
/*asm (".cfi_undefined rip");*/ |
95 |
root |
1.56 |
#endif |
96 |
|
|
|
97 |
root |
1.3 |
func ((void *)arg); |
98 |
root |
1.1 |
|
99 |
root |
1.70 |
#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64 |
100 |
root |
1.71 |
/*asm (".cfi_endproc");*/ |
101 |
root |
1.70 |
#endif |
102 |
|
|
|
103 |
root |
1.1 |
/* the new coro returned. bad. just abort() for now */ |
104 |
|
|
abort (); |
105 |
|
|
} |
106 |
|
|
|
107 |
|
|
# if CORO_SJLJ |
108 |
|
|
|
109 |
root |
1.39 |
static volatile int trampoline_done; |
110 |
root |
1.1 |
|
111 |
|
|
/* trampoline signal handler */ |
112 |
|
|
static void |
113 |
root |
1.14 |
trampoline (int sig) |
114 |
root |
1.1 |
{ |
115 |
root |
1.44 |
if (coro_setjmp (new_coro->env)) |
116 |
|
|
coro_init (); /* start it */ |
117 |
root |
1.1 |
else |
118 |
root |
1.39 |
trampoline_done = 1; |
119 |
root |
1.1 |
} |
120 |
|
|
|
121 |
|
|
# endif |
122 |
|
|
|
123 |
root |
1.39 |
# if CORO_ASM |
124 |
root |
1.38 |
|
125 |
root |
1.68 |
#if __arm__ && \ |
126 |
|
|
(defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
127 |
|
|
|| defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ \ |
128 |
root |
1.69 |
|| __ARM_ARCH == 7) |
129 |
root |
1.68 |
#define CORO_ARM 1 |
130 |
|
|
#endif |
131 |
|
|
|
132 |
root |
1.63 |
#if _WIN32 || __CYGWIN__ |
133 |
root |
1.60 |
#define CORO_WIN_TIB 1 |
134 |
|
|
#endif |
135 |
|
|
|
136 |
root |
1.38 |
asm ( |
137 |
root |
1.60 |
"\t.text\n" |
138 |
root |
1.63 |
#if _WIN32 || __CYGWIN__ |
139 |
|
|
"\t.globl _coro_transfer\n" |
140 |
|
|
"_coro_transfer:\n" |
141 |
|
|
#else |
142 |
root |
1.60 |
"\t.globl coro_transfer\n" |
143 |
root |
1.38 |
"coro_transfer:\n" |
144 |
root |
1.63 |
#endif |
145 |
root |
1.52 |
/* windows, of course, gives a shit on the amd64 ABI and uses different registers */ |
146 |
|
|
/* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */ |
147 |
root |
1.39 |
#if __amd64 |
148 |
root |
1.63 |
|
149 |
|
|
#if _WIN32 || __CYGWIN__ |
150 |
|
|
#define NUM_SAVED 29 |
151 |
|
|
"\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */ |
152 |
|
|
"\tmovaps %xmm6, (%rsp)\n" |
153 |
|
|
"\tmovaps %xmm7, 16(%rsp)\n" |
154 |
|
|
"\tmovaps %xmm8, 32(%rsp)\n" |
155 |
|
|
"\tmovaps %xmm9, 48(%rsp)\n" |
156 |
|
|
"\tmovaps %xmm10, 64(%rsp)\n" |
157 |
|
|
"\tmovaps %xmm11, 80(%rsp)\n" |
158 |
|
|
"\tmovaps %xmm12, 96(%rsp)\n" |
159 |
|
|
"\tmovaps %xmm13, 112(%rsp)\n" |
160 |
|
|
"\tmovaps %xmm14, 128(%rsp)\n" |
161 |
|
|
"\tmovaps %xmm15, 144(%rsp)\n" |
162 |
root |
1.59 |
"\tpushq %rsi\n" |
163 |
|
|
"\tpushq %rdi\n" |
164 |
|
|
"\tpushq %rbp\n" |
165 |
|
|
"\tpushq %rbx\n" |
166 |
|
|
"\tpushq %r12\n" |
167 |
|
|
"\tpushq %r13\n" |
168 |
|
|
"\tpushq %r14\n" |
169 |
|
|
"\tpushq %r15\n" |
170 |
root |
1.60 |
#if CORO_WIN_TIB |
171 |
|
|
"\tpushq %fs:0x0\n" |
172 |
|
|
"\tpushq %fs:0x8\n" |
173 |
|
|
"\tpushq %fs:0xc\n" |
174 |
|
|
#endif |
175 |
root |
1.59 |
"\tmovq %rsp, (%rcx)\n" |
176 |
|
|
"\tmovq (%rdx), %rsp\n" |
177 |
root |
1.60 |
#if CORO_WIN_TIB |
178 |
|
|
"\tpopq %fs:0xc\n" |
179 |
|
|
"\tpopq %fs:0x8\n" |
180 |
|
|
"\tpopq %fs:0x0\n" |
181 |
|
|
#endif |
182 |
root |
1.59 |
"\tpopq %r15\n" |
183 |
|
|
"\tpopq %r14\n" |
184 |
|
|
"\tpopq %r13\n" |
185 |
|
|
"\tpopq %r12\n" |
186 |
|
|
"\tpopq %rbx\n" |
187 |
|
|
"\tpopq %rbp\n" |
188 |
|
|
"\tpopq %rdi\n" |
189 |
|
|
"\tpopq %rsi\n" |
190 |
root |
1.63 |
"\tmovaps (%rsp), %xmm6\n" |
191 |
|
|
"\tmovaps 16(%rsp), %xmm7\n" |
192 |
|
|
"\tmovaps 32(%rsp), %xmm8\n" |
193 |
|
|
"\tmovaps 48(%rsp), %xmm9\n" |
194 |
|
|
"\tmovaps 64(%rsp), %xmm10\n" |
195 |
|
|
"\tmovaps 80(%rsp), %xmm11\n" |
196 |
|
|
"\tmovaps 96(%rsp), %xmm12\n" |
197 |
|
|
"\tmovaps 112(%rsp), %xmm13\n" |
198 |
|
|
"\tmovaps 128(%rsp), %xmm14\n" |
199 |
|
|
"\tmovaps 144(%rsp), %xmm15\n" |
200 |
|
|
"\taddq $168, %rsp\n" |
201 |
root |
1.58 |
#else |
202 |
|
|
#define NUM_SAVED 6 |
203 |
root |
1.59 |
"\tpushq %rbp\n" |
204 |
|
|
"\tpushq %rbx\n" |
205 |
|
|
"\tpushq %r12\n" |
206 |
|
|
"\tpushq %r13\n" |
207 |
|
|
"\tpushq %r14\n" |
208 |
|
|
"\tpushq %r15\n" |
209 |
|
|
"\tmovq %rsp, (%rdi)\n" |
210 |
|
|
"\tmovq (%rsi), %rsp\n" |
211 |
|
|
"\tpopq %r15\n" |
212 |
|
|
"\tpopq %r14\n" |
213 |
|
|
"\tpopq %r13\n" |
214 |
|
|
"\tpopq %r12\n" |
215 |
|
|
"\tpopq %rbx\n" |
216 |
|
|
"\tpopq %rbp\n" |
217 |
root |
1.50 |
#endif |
218 |
root |
1.63 |
"\tpopq %rcx\n" |
219 |
|
|
"\tjmpq *%rcx\n" |
220 |
|
|
|
221 |
root |
1.68 |
#elif __i386__ |
222 |
root |
1.63 |
|
223 |
root |
1.50 |
#define NUM_SAVED 4 |
224 |
root |
1.59 |
"\tpushl %ebp\n" |
225 |
|
|
"\tpushl %ebx\n" |
226 |
|
|
"\tpushl %esi\n" |
227 |
|
|
"\tpushl %edi\n" |
228 |
root |
1.50 |
#if CORO_WIN_TIB |
229 |
root |
1.63 |
#undef NUM_SAVED |
230 |
|
|
#define NUM_SAVED 7 |
231 |
root |
1.59 |
"\tpushl %fs:0\n" |
232 |
|
|
"\tpushl %fs:4\n" |
233 |
|
|
"\tpushl %fs:8\n" |
234 |
root |
1.50 |
#endif |
235 |
root |
1.59 |
"\tmovl %esp, (%eax)\n" |
236 |
|
|
"\tmovl (%edx), %esp\n" |
237 |
root |
1.50 |
#if CORO_WIN_TIB |
238 |
root |
1.59 |
"\tpopl %fs:8\n" |
239 |
|
|
"\tpopl %fs:4\n" |
240 |
|
|
"\tpopl %fs:0\n" |
241 |
root |
1.50 |
#endif |
242 |
root |
1.59 |
"\tpopl %edi\n" |
243 |
|
|
"\tpopl %esi\n" |
244 |
|
|
"\tpopl %ebx\n" |
245 |
|
|
"\tpopl %ebp\n" |
246 |
root |
1.63 |
"\tpopl %ecx\n" |
247 |
|
|
"\tjmpl *%ecx\n" |
248 |
|
|
|
249 |
root |
1.68 |
#elif CORO_ARM /* untested, what about thumb, neon, iwmmxt? */ |
250 |
|
|
|
251 |
|
|
#if __ARM_PCS_VFP |
252 |
|
|
"\tvpush {d8-d15}\n" |
253 |
|
|
#define NUM_SAVED (9 + 8 * 2) |
254 |
|
|
#else |
255 |
|
|
#define NUM_SAVED 9 |
256 |
|
|
#endif |
257 |
|
|
"\tpush {r4-r11,lr}\n" |
258 |
|
|
"\tstr sp, [r0]\n" |
259 |
|
|
"\tldr sp, [r1]\n" |
260 |
|
|
"\tpop {r4-r11,lr}\n" |
261 |
|
|
#if __ARM_PCS_VFP |
262 |
|
|
"\tvpop {d8-d15}\n" |
263 |
|
|
#endif |
264 |
|
|
"\tmov r15, lr\n" |
265 |
|
|
|
266 |
|
|
#elif __mips__ && 0 /* untested, 32 bit only */ |
267 |
|
|
|
268 |
|
|
#define NUM_SAVED (12 + 8 * 2) |
269 |
|
|
/* TODO: n64/o64, lw=>ld */ |
270 |
|
|
|
271 |
|
|
"\t.set nomips16\n" |
272 |
|
|
"\t.frame $sp,112,$31\n" |
273 |
|
|
#if __mips_soft_float |
274 |
|
|
"\taddiu $sp,$sp,-44\n" |
275 |
|
|
#else |
276 |
|
|
"\taddiu $sp,$sp,-112\n" |
277 |
|
|
"\ts.d $f30,88($sp)\n" |
278 |
|
|
"\ts.d $f28,80($sp)\n" |
279 |
|
|
"\ts.d $f26,72($sp)\n" |
280 |
|
|
"\ts.d $f24,64($sp)\n" |
281 |
|
|
"\ts.d $f22,56($sp)\n" |
282 |
|
|
"\ts.d $f20,48($sp)\n" |
283 |
|
|
#endif |
284 |
|
|
"\tsw $28,40($sp)\n" |
285 |
|
|
"\tsw $31,36($sp)\n" |
286 |
|
|
"\tsw $fp,32($sp)\n" |
287 |
|
|
"\tsw $23,28($sp)\n" |
288 |
|
|
"\tsw $22,24($sp)\n" |
289 |
|
|
"\tsw $21,20($sp)\n" |
290 |
|
|
"\tsw $20,16($sp)\n" |
291 |
|
|
"\tsw $19,12($sp)\n" |
292 |
|
|
"\tsw $18,8($sp)\n" |
293 |
|
|
"\tsw $17,4($sp)\n" |
294 |
|
|
"\tsw $16,0($sp)\n" |
295 |
|
|
"\tsw $sp,0($4)\n" |
296 |
|
|
"\tlw $sp,0($5)\n" |
297 |
|
|
#if !__mips_soft_float |
298 |
|
|
"\tl.d $f30,88($sp)\n" |
299 |
|
|
"\tl.d $f28,80($sp)\n" |
300 |
|
|
"\tl.d $f26,72($sp)\n" |
301 |
|
|
"\tl.d $f24,64($sp)\n" |
302 |
|
|
"\tl.d $f22,56($sp)\n" |
303 |
|
|
"\tl.d $f20,48($sp)\n" |
304 |
|
|
#endif |
305 |
|
|
"\tlw $28,40($sp)\n" |
306 |
|
|
"\tlw $31,36($sp)\n" |
307 |
|
|
"\tlw $fp,32($sp)\n" |
308 |
|
|
"\tlw $23,28($sp)\n" |
309 |
|
|
"\tlw $22,24($sp)\n" |
310 |
|
|
"\tlw $21,20($sp)\n" |
311 |
|
|
"\tlw $20,16($sp)\n" |
312 |
|
|
"\tlw $19,12($sp)\n" |
313 |
|
|
"\tlw $18,8($sp)\n" |
314 |
|
|
"\tlw $17,4($sp)\n" |
315 |
|
|
"\tlw $16,0($sp)\n" |
316 |
|
|
"\tj $31\n" |
317 |
|
|
#if __mips_soft_float |
318 |
|
|
"\taddiu $sp,$sp,44\n" |
319 |
|
|
#else |
320 |
|
|
"\taddiu $sp,$sp,112\n" |
321 |
|
|
#endif |
322 |
|
|
|
323 |
root |
1.39 |
#else |
324 |
|
|
#error unsupported architecture |
325 |
|
|
#endif |
326 |
root |
1.38 |
); |
327 |
|
|
|
328 |
root |
1.39 |
# endif |
329 |
root |
1.38 |
|
330 |
|
|
void |
331 |
root |
1.66 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
332 |
root |
1.1 |
{ |
333 |
root |
1.39 |
coro_context nctx; |
334 |
root |
1.1 |
# if CORO_SJLJ |
335 |
|
|
stack_t ostk, nstk; |
336 |
|
|
struct sigaction osa, nsa; |
337 |
|
|
sigset_t nsig, osig; |
338 |
|
|
# endif |
339 |
|
|
|
340 |
root |
1.38 |
if (!coro) |
341 |
|
|
return; |
342 |
|
|
|
343 |
root |
1.1 |
coro_init_func = coro; |
344 |
|
|
coro_init_arg = arg; |
345 |
|
|
|
346 |
|
|
new_coro = ctx; |
347 |
|
|
create_coro = &nctx; |
348 |
|
|
|
349 |
|
|
# if CORO_SJLJ |
350 |
|
|
/* we use SIGUSR2. first block it, then fiddle with it. */ |
351 |
|
|
|
352 |
|
|
sigemptyset (&nsig); |
353 |
|
|
sigaddset (&nsig, SIGUSR2); |
354 |
|
|
sigprocmask (SIG_BLOCK, &nsig, &osig); |
355 |
|
|
|
356 |
|
|
nsa.sa_handler = trampoline; |
357 |
|
|
sigemptyset (&nsa.sa_mask); |
358 |
|
|
nsa.sa_flags = SA_ONSTACK; |
359 |
|
|
|
360 |
|
|
if (sigaction (SIGUSR2, &nsa, &osa)) |
361 |
root |
1.23 |
{ |
362 |
|
|
perror ("sigaction"); |
363 |
|
|
abort (); |
364 |
|
|
} |
365 |
root |
1.1 |
|
366 |
|
|
/* set the new stack */ |
367 |
root |
1.54 |
nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */ |
368 |
|
|
nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize); |
369 |
root |
1.1 |
nstk.ss_flags = 0; |
370 |
|
|
|
371 |
|
|
if (sigaltstack (&nstk, &ostk) < 0) |
372 |
root |
1.23 |
{ |
373 |
|
|
perror ("sigaltstack"); |
374 |
|
|
abort (); |
375 |
|
|
} |
376 |
root |
1.1 |
|
377 |
root |
1.39 |
trampoline_done = 0; |
378 |
root |
1.1 |
kill (getpid (), SIGUSR2); |
379 |
|
|
sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); |
380 |
|
|
|
381 |
root |
1.39 |
while (!trampoline_done) |
382 |
root |
1.1 |
sigsuspend (&nsig); |
383 |
|
|
|
384 |
|
|
sigaltstack (0, &nstk); |
385 |
|
|
nstk.ss_flags = SS_DISABLE; |
386 |
|
|
if (sigaltstack (&nstk, 0) < 0) |
387 |
|
|
perror ("sigaltstack"); |
388 |
|
|
|
389 |
|
|
sigaltstack (0, &nstk); |
390 |
|
|
if (~nstk.ss_flags & SS_DISABLE) |
391 |
|
|
abort (); |
392 |
|
|
|
393 |
|
|
if (~ostk.ss_flags & SS_DISABLE) |
394 |
|
|
sigaltstack (&ostk, 0); |
395 |
|
|
|
396 |
root |
1.19 |
sigaction (SIGUSR2, &osa, 0); |
397 |
root |
1.1 |
sigprocmask (SIG_SETMASK, &osig, 0); |
398 |
|
|
|
399 |
root |
1.16 |
# elif CORO_LOSER |
400 |
root |
1.1 |
|
401 |
root |
1.44 |
coro_setjmp (ctx->env); |
402 |
root |
1.68 |
#if __CYGWIN__ && __i386__ |
403 |
root |
1.47 |
ctx->env[8] = (long) coro_init; |
404 |
|
|
ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); |
405 |
root |
1.68 |
#elif __CYGWIN__ && __x86_64__ |
406 |
root |
1.48 |
ctx->env[7] = (long) coro_init; |
407 |
|
|
ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); |
408 |
root |
1.63 |
#elif defined __MINGW32__ |
409 |
root |
1.47 |
ctx->env[5] = (long) coro_init; |
410 |
|
|
ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); |
411 |
root |
1.63 |
#elif defined _M_IX86 |
412 |
root |
1.47 |
((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; |
413 |
|
|
((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
414 |
root |
1.63 |
#elif defined _M_AMD64 |
415 |
root |
1.47 |
((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; |
416 |
root |
1.48 |
((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); |
417 |
root |
1.63 |
#elif defined _M_IA64 |
418 |
root |
1.47 |
((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; |
419 |
root |
1.48 |
((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); |
420 |
root |
1.39 |
#else |
421 |
|
|
#error "microsoft libc or architecture not supported" |
422 |
|
|
#endif |
423 |
root |
1.4 |
|
424 |
|
|
# elif CORO_LINUX |
425 |
|
|
|
426 |
root |
1.44 |
coro_setjmp (ctx->env); |
427 |
root |
1.39 |
#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) |
428 |
root |
1.47 |
ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; |
429 |
|
|
ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
430 |
root |
1.39 |
#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) |
431 |
|
|
ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; |
432 |
root |
1.47 |
ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long); |
433 |
root |
1.39 |
#elif defined (__GNU_LIBRARY__) && defined (__i386__) |
434 |
root |
1.47 |
ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init; |
435 |
|
|
ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); |
436 |
root |
1.68 |
#elif defined (__GNU_LIBRARY__) && defined (__x86_64__) |
437 |
root |
1.47 |
ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; |
438 |
|
|
ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); |
439 |
root |
1.39 |
#else |
440 |
|
|
#error "linux libc or architecture not supported" |
441 |
|
|
#endif |
442 |
root |
1.5 |
|
443 |
|
|
# elif CORO_IRIX |
444 |
|
|
|
445 |
root |
1.44 |
coro_setjmp (ctx->env, 0); |
446 |
root |
1.47 |
ctx->env[JB_PC] = (__uint64_t)coro_init; |
447 |
|
|
ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); |
448 |
root |
1.1 |
|
449 |
root |
1.27 |
# elif CORO_ASM |
450 |
|
|
|
451 |
root |
1.68 |
#if __i386__ || __x86_64__ |
452 |
|
|
ctx->sp = (void **)(ssize + (char *)sptr); |
453 |
|
|
*--ctx->sp = (void *)abort; /* needed for alignment only */ |
454 |
|
|
*--ctx->sp = (void *)coro_init; |
455 |
|
|
#if CORO_WIN_TIB |
456 |
|
|
*--ctx->sp = 0; /* ExceptionList */ |
457 |
|
|
*--ctx->sp = (char *)sptr + ssize; /* StackBase */ |
458 |
|
|
*--ctx->sp = sptr; /* StackLimit */ |
459 |
|
|
#endif |
460 |
|
|
#elif CORO_ARM |
461 |
|
|
/* return address stored in lr register, don't push anything */ |
462 |
|
|
#else |
463 |
|
|
#error unsupported architecture |
464 |
root |
1.50 |
#endif |
465 |
|
|
|
466 |
root |
1.51 |
ctx->sp -= NUM_SAVED; |
467 |
root |
1.55 |
memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED); |
468 |
root |
1.51 |
|
469 |
root |
1.68 |
#if __i386__ || __x86_64__ |
470 |
|
|
/* done already */ |
471 |
|
|
#elif CORO_ARM |
472 |
|
|
ctx->sp[0] = coro; /* r4 */ |
473 |
|
|
ctx->sp[1] = arg; /* r5 */ |
474 |
|
|
ctx->sp[8] = (char *)coro_init; /* lr */ |
475 |
|
|
#else |
476 |
|
|
#error unsupported architecture |
477 |
|
|
#endif |
478 |
|
|
|
479 |
root |
1.39 |
# elif CORO_UCONTEXT |
480 |
|
|
|
481 |
|
|
getcontext (&(ctx->uc)); |
482 |
|
|
|
483 |
|
|
ctx->uc.uc_link = 0; |
484 |
|
|
ctx->uc.uc_stack.ss_sp = sptr; |
485 |
|
|
ctx->uc.uc_stack.ss_size = (size_t)ssize; |
486 |
|
|
ctx->uc.uc_stack.ss_flags = 0; |
487 |
|
|
|
488 |
|
|
makecontext (&(ctx->uc), (void (*)())coro_init, 0); |
489 |
|
|
|
490 |
root |
1.1 |
# endif |
491 |
|
|
|
492 |
root |
1.39 |
coro_transfer (create_coro, new_coro); |
493 |
|
|
} |
494 |
|
|
|
495 |
|
|
/*****************************************************************************/ |
496 |
|
|
/* pthread backend */ |
497 |
|
|
/*****************************************************************************/ |
498 |
root |
1.40 |
#elif CORO_PTHREAD |
499 |
root |
1.1 |
|
500 |
root |
1.39 |
/* this mutex will be locked by the running coroutine */ |
501 |
|
|
pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; |
502 |
root |
1.30 |
|
503 |
root |
1.39 |
struct coro_init_args |
504 |
|
|
{ |
505 |
|
|
coro_func func; |
506 |
|
|
void *arg; |
507 |
|
|
coro_context *self, *main; |
508 |
|
|
}; |
509 |
|
|
|
510 |
|
|
static pthread_t null_tid; |
511 |
|
|
|
512 |
|
|
/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */ |
513 |
|
|
static void |
514 |
|
|
mutex_unlock_wrapper (void *arg) |
515 |
|
|
{ |
516 |
|
|
pthread_mutex_unlock ((pthread_mutex_t *)arg); |
517 |
|
|
} |
518 |
|
|
|
519 |
|
|
static void * |
520 |
|
|
coro_init (void *args_) |
521 |
|
|
{ |
522 |
|
|
struct coro_init_args *args = (struct coro_init_args *)args_; |
523 |
|
|
coro_func func = args->func; |
524 |
|
|
void *arg = args->arg; |
525 |
|
|
|
526 |
|
|
pthread_mutex_lock (&coro_mutex); |
527 |
|
|
|
528 |
|
|
/* we try to be good citizens and use deferred cancellation and cleanup handlers */ |
529 |
|
|
pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex); |
530 |
|
|
coro_transfer (args->self, args->main); |
531 |
|
|
func (arg); |
532 |
|
|
pthread_cleanup_pop (1); |
533 |
|
|
|
534 |
|
|
return 0; |
535 |
|
|
} |
536 |
|
|
|
537 |
|
|
void |
538 |
|
|
coro_transfer (coro_context *prev, coro_context *next) |
539 |
|
|
{ |
540 |
|
|
pthread_cond_signal (&next->cv); |
541 |
|
|
pthread_cond_wait (&prev->cv, &coro_mutex); |
542 |
root |
1.43 |
#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */ |
543 |
|
|
pthread_testcancel (); |
544 |
|
|
#endif |
545 |
root |
1.39 |
} |
546 |
|
|
|
547 |
|
|
void |
548 |
root |
1.66 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
549 |
root |
1.39 |
{ |
550 |
root |
1.38 |
static coro_context nctx; |
551 |
root |
1.30 |
static int once; |
552 |
|
|
|
553 |
|
|
if (!once) |
554 |
|
|
{ |
555 |
root |
1.38 |
once = 1; |
556 |
|
|
|
557 |
root |
1.30 |
pthread_mutex_lock (&coro_mutex); |
558 |
root |
1.38 |
pthread_cond_init (&nctx.cv, 0); |
559 |
|
|
null_tid = pthread_self (); |
560 |
root |
1.30 |
} |
561 |
|
|
|
562 |
root |
1.38 |
pthread_cond_init (&ctx->cv, 0); |
563 |
root |
1.30 |
|
564 |
root |
1.38 |
if (coro) |
565 |
|
|
{ |
566 |
|
|
pthread_attr_t attr; |
567 |
|
|
struct coro_init_args args; |
568 |
|
|
|
569 |
|
|
args.func = coro; |
570 |
|
|
args.arg = arg; |
571 |
|
|
args.self = ctx; |
572 |
|
|
args.main = &nctx; |
573 |
|
|
|
574 |
|
|
pthread_attr_init (&attr); |
575 |
root |
1.53 |
#if __UCLIBC__ |
576 |
root |
1.54 |
/* exists, but is borked */ |
577 |
|
|
/*pthread_attr_setstacksize (&attr, (size_t)ssize);*/ |
578 |
root |
1.62 |
#elif __CYGWIN__ |
579 |
|
|
/* POSIX, not here */ |
580 |
|
|
pthread_attr_setstacksize (&attr, (size_t)ssize); |
581 |
root |
1.53 |
#else |
582 |
root |
1.38 |
pthread_attr_setstack (&attr, sptr, (size_t)ssize); |
583 |
root |
1.53 |
#endif |
584 |
root |
1.38 |
pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); |
585 |
root |
1.39 |
pthread_create (&ctx->id, &attr, coro_init, &args); |
586 |
root |
1.38 |
|
587 |
|
|
coro_transfer (args.main, args.self); |
588 |
|
|
} |
589 |
|
|
else |
590 |
|
|
ctx->id = null_tid; |
591 |
root |
1.39 |
} |
592 |
|
|
|
593 |
|
|
void |
594 |
|
|
coro_destroy (coro_context *ctx) |
595 |
|
|
{ |
596 |
|
|
if (!pthread_equal (ctx->id, null_tid)) |
597 |
|
|
{ |
598 |
|
|
pthread_cancel (ctx->id); |
599 |
root |
1.72 |
pthread_mutex_unlock (&coro_mutex); /* let the other coro run */ |
600 |
root |
1.39 |
pthread_join (ctx->id, 0); |
601 |
|
|
pthread_mutex_lock (&coro_mutex); |
602 |
|
|
} |
603 |
|
|
|
604 |
|
|
pthread_cond_destroy (&ctx->cv); |
605 |
|
|
} |
606 |
root |
1.30 |
|
607 |
root |
1.63 |
/*****************************************************************************/ |
608 |
|
|
/* fiber backend */ |
609 |
|
|
/*****************************************************************************/ |
610 |
|
|
#elif CORO_FIBER |
611 |
|
|
|
612 |
|
|
#define WIN32_LEAN_AND_MEAN |
613 |
root |
1.66 |
#if _WIN32_WINNT < 0x0400 |
614 |
|
|
#undef _WIN32_WINNT |
615 |
|
|
#define _WIN32_WINNT 0x0400 |
616 |
|
|
#endif |
617 |
root |
1.63 |
#include <windows.h> |
618 |
|
|
|
619 |
|
|
VOID CALLBACK |
620 |
|
|
coro_init (PVOID arg) |
621 |
|
|
{ |
622 |
|
|
coro_context *ctx = (coro_context *)arg; |
623 |
|
|
|
624 |
|
|
ctx->coro (ctx->arg); |
625 |
|
|
} |
626 |
|
|
|
627 |
|
|
void |
628 |
|
|
coro_transfer (coro_context *prev, coro_context *next) |
629 |
|
|
{ |
630 |
|
|
if (!prev->fiber) |
631 |
|
|
{ |
632 |
|
|
prev->fiber = GetCurrentFiber (); |
633 |
|
|
|
634 |
|
|
if (prev->fiber == 0 || prev->fiber == (void *)0x1e00) |
635 |
|
|
prev->fiber = ConvertThreadToFiber (0); |
636 |
|
|
} |
637 |
|
|
|
638 |
|
|
SwitchToFiber (next->fiber); |
639 |
|
|
} |
640 |
|
|
|
641 |
|
|
void |
642 |
root |
1.66 |
coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) |
643 |
root |
1.63 |
{ |
644 |
|
|
ctx->fiber = 0; |
645 |
|
|
ctx->coro = coro; |
646 |
|
|
ctx->arg = arg; |
647 |
|
|
|
648 |
|
|
if (!coro) |
649 |
|
|
return; |
650 |
|
|
|
651 |
|
|
ctx->fiber = CreateFiber (ssize, coro_init, ctx); |
652 |
|
|
} |
653 |
|
|
|
654 |
|
|
void |
655 |
|
|
coro_destroy (coro_context *ctx) |
656 |
|
|
{ |
657 |
|
|
DeleteFiber (ctx->fiber); |
658 |
|
|
} |
659 |
|
|
|
660 |
root |
1.40 |
#else |
661 |
root |
1.66 |
#error unsupported backend |
662 |
|
|
#endif |
663 |
|
|
|
664 |
|
|
/*****************************************************************************/ |
665 |
|
|
/* stack management */ |
666 |
|
|
/*****************************************************************************/ |
667 |
|
|
#if CORO_STACKALLOC |
668 |
|
|
|
669 |
|
|
#include <stdlib.h> |
670 |
|
|
|
671 |
|
|
#ifndef _WIN32 |
672 |
|
|
# include <unistd.h> |
673 |
|
|
#endif |
674 |
|
|
|
675 |
|
|
#if CORO_USE_VALGRIND |
676 |
|
|
# include <valgrind/valgrind.h> |
677 |
|
|
#endif |
678 |
|
|
|
679 |
|
|
#if _POSIX_MAPPED_FILES |
680 |
|
|
# include <sys/mman.h> |
681 |
|
|
# define CORO_MMAP 1 |
682 |
|
|
# ifndef MAP_ANONYMOUS |
683 |
|
|
# ifdef MAP_ANON |
684 |
|
|
# define MAP_ANONYMOUS MAP_ANON |
685 |
|
|
# else |
686 |
|
|
# undef CORO_MMAP |
687 |
|
|
# endif |
688 |
|
|
# endif |
689 |
|
|
# include <limits.h> |
690 |
|
|
#else |
691 |
|
|
# undef CORO_MMAP |
692 |
|
|
#endif |
693 |
|
|
|
694 |
|
|
#if _POSIX_MEMORY_PROTECTION |
695 |
|
|
# ifndef CORO_GUARDPAGES |
696 |
|
|
# define CORO_GUARDPAGES 4 |
697 |
|
|
# endif |
698 |
|
|
#else |
699 |
|
|
# undef CORO_GUARDPAGES |
700 |
|
|
#endif |
701 |
|
|
|
702 |
|
|
#if !CORO_MMAP |
703 |
|
|
# undef CORO_GUARDPAGES |
704 |
|
|
#endif |
705 |
|
|
|
706 |
root |
1.68 |
#if !__i386__ && !__x86_64__ && !__powerpc__ && !__arm__ && !__aarch64__ && !__m68k__ && !__alpha__ && !__mips__ && !__sparc64__ |
707 |
root |
1.66 |
# undef CORO_GUARDPAGES |
708 |
|
|
#endif |
709 |
|
|
|
710 |
|
|
#ifndef CORO_GUARDPAGES |
711 |
|
|
# define CORO_GUARDPAGES 0 |
712 |
|
|
#endif |
713 |
|
|
|
714 |
|
|
#if !PAGESIZE |
715 |
|
|
#if !CORO_MMAP |
716 |
|
|
#define PAGESIZE 4096 |
717 |
|
|
#else |
718 |
|
|
static size_t |
719 |
|
|
coro_pagesize (void) |
720 |
|
|
{ |
721 |
|
|
static size_t pagesize; |
722 |
|
|
|
723 |
|
|
if (!pagesize) |
724 |
|
|
pagesize = sysconf (_SC_PAGESIZE); |
725 |
|
|
|
726 |
|
|
return pagesize; |
727 |
|
|
} |
728 |
|
|
|
729 |
|
|
#define PAGESIZE coro_pagesize () |
730 |
|
|
#endif |
731 |
|
|
#endif |
732 |
|
|
|
733 |
|
|
int |
734 |
|
|
coro_stack_alloc (struct coro_stack *stack, unsigned int size) |
735 |
|
|
{ |
736 |
|
|
if (!size) |
737 |
|
|
size = 256 * 1024; |
738 |
|
|
|
739 |
|
|
stack->sptr = 0; |
740 |
|
|
stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE; |
741 |
|
|
|
742 |
|
|
#if CORO_FIBER |
743 |
|
|
|
744 |
|
|
stack->sptr = (void *)stack; |
745 |
|
|
return 1; |
746 |
|
|
|
747 |
|
|
#else |
748 |
|
|
|
749 |
|
|
size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE; |
750 |
|
|
void *base; |
751 |
|
|
|
752 |
|
|
#if CORO_MMAP |
753 |
|
|
/* mmap supposedly does allocate-on-write for us */ |
754 |
|
|
base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
755 |
|
|
|
756 |
|
|
if (base == (void *)-1) |
757 |
|
|
{ |
758 |
|
|
/* some systems don't let us have executable heap */ |
759 |
|
|
/* we assume they won't need executable stack in that case */ |
760 |
|
|
base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
761 |
|
|
|
762 |
|
|
if (base == (void *)-1) |
763 |
|
|
return 0; |
764 |
|
|
} |
765 |
|
|
|
766 |
|
|
#if CORO_GUARDPAGES |
767 |
|
|
mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE); |
768 |
|
|
#endif |
769 |
|
|
|
770 |
|
|
base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE); |
771 |
|
|
#else |
772 |
|
|
base = malloc (ssze); |
773 |
|
|
if (!base) |
774 |
|
|
return 0; |
775 |
|
|
#endif |
776 |
|
|
|
777 |
|
|
#if CORO_USE_VALGRIND |
778 |
root |
1.67 |
stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE); |
779 |
root |
1.66 |
#endif |
780 |
|
|
|
781 |
|
|
stack->sptr = base; |
782 |
|
|
return 1; |
783 |
|
|
|
784 |
|
|
#endif |
785 |
|
|
} |
786 |
|
|
|
787 |
|
|
void |
788 |
|
|
coro_stack_free (struct coro_stack *stack) |
789 |
|
|
{ |
790 |
|
|
#if CORO_FIBER |
791 |
|
|
/* nop */ |
792 |
|
|
#else |
793 |
|
|
#if CORO_USE_VALGRIND |
794 |
|
|
VALGRIND_STACK_DEREGISTER (stack->valgrind_id); |
795 |
|
|
#endif |
796 |
|
|
|
797 |
|
|
#if CORO_MMAP |
798 |
|
|
if (stack->sptr) |
799 |
|
|
munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE), |
800 |
|
|
stack->ssze + CORO_GUARDPAGES * PAGESIZE); |
801 |
|
|
#else |
802 |
|
|
free (stack->sptr); |
803 |
|
|
#endif |
804 |
|
|
#endif |
805 |
|
|
} |
806 |
|
|
|
807 |
root |
1.1 |
#endif |
808 |
|
|
|