ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
Revision: 1.72
Committed: Tue Aug 14 14:25:10 2018 UTC (5 years, 9 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.71: +1 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 *
4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
22 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
23 * OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Alternatively, the contents of this file may be used under the terms of
26 * the GNU General Public License ("GPL") version 2 or any later version,
27 * in which case the provisions of the GPL are applicable instead of
28 * the above. If you wish to allow the use of your version of this file
29 * only under the terms of the GPL and not to allow others to use your
30 * version of this file under the BSD license, indicate your decision
31 * by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL. If you do not delete the
33 * provisions above, a recipient may use your version of this file under
34 * either the BSD or the GPL.
35 *
36 * This library is modelled strictly after Ralf S. Engelschalls article at
37 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must
38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */
40
41 #include "coro.h"
42
43 #include <stddef.h>
44 #include <string.h>
45
46 /*****************************************************************************/
47 /* ucontext/setjmp/asm backends */
48 /*****************************************************************************/
49 #if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
50
51 # if CORO_UCONTEXT
52 # include <stddef.h>
53 # endif
54
55 # if !defined(STACK_ADJUST_PTR)
56 # if __sgi
57 /* IRIX is decidedly NON-unix */
58 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
59 # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
60 # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
61 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
62 # define STACK_ADJUST_SIZE(sp,ss) (ss)
63 # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
64 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
65 # define STACK_ADJUST_SIZE(sp,ss) (ss)
66 # else
67 # define STACK_ADJUST_PTR(sp,ss) (sp)
68 # define STACK_ADJUST_SIZE(sp,ss) (ss)
69 # endif
70 # endif
71
72 # include <stdlib.h>
73
74 # if CORO_SJLJ
75 # include <stdio.h>
76 # include <signal.h>
77 # include <unistd.h>
78 # endif
79
80 static coro_func coro_init_func;
81 static void *coro_init_arg;
82 static coro_context *new_coro, *create_coro;
83
84 static void
85 coro_init (void)
86 {
87 volatile coro_func func = coro_init_func;
88 volatile void *arg = coro_init_arg;
89
90 coro_transfer (new_coro, create_coro);
91
92 #if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
93 /*asm (".cfi_startproc");*/
94 /*asm (".cfi_undefined rip");*/
95 #endif
96
97 func ((void *)arg);
98
99 #if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
100 /*asm (".cfi_endproc");*/
101 #endif
102
103 /* the new coro returned. bad. just abort() for now */
104 abort ();
105 }
106
107 # if CORO_SJLJ
108
109 static volatile int trampoline_done;
110
111 /* trampoline signal handler */
112 static void
113 trampoline (int sig)
114 {
115 if (coro_setjmp (new_coro->env))
116 coro_init (); /* start it */
117 else
118 trampoline_done = 1;
119 }
120
121 # endif
122
123 # if CORO_ASM
124
125 #if __arm__ && \
126 (defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
127 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ \
128 || __ARM_ARCH == 7)
129 #define CORO_ARM 1
130 #endif
131
132 #if _WIN32 || __CYGWIN__
133 #define CORO_WIN_TIB 1
134 #endif
135
136 asm (
137 "\t.text\n"
138 #if _WIN32 || __CYGWIN__
139 "\t.globl _coro_transfer\n"
140 "_coro_transfer:\n"
141 #else
142 "\t.globl coro_transfer\n"
143 "coro_transfer:\n"
144 #endif
145 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
146 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
147 #if __amd64
148
149 #if _WIN32 || __CYGWIN__
150 #define NUM_SAVED 29
151 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */
152 "\tmovaps %xmm6, (%rsp)\n"
153 "\tmovaps %xmm7, 16(%rsp)\n"
154 "\tmovaps %xmm8, 32(%rsp)\n"
155 "\tmovaps %xmm9, 48(%rsp)\n"
156 "\tmovaps %xmm10, 64(%rsp)\n"
157 "\tmovaps %xmm11, 80(%rsp)\n"
158 "\tmovaps %xmm12, 96(%rsp)\n"
159 "\tmovaps %xmm13, 112(%rsp)\n"
160 "\tmovaps %xmm14, 128(%rsp)\n"
161 "\tmovaps %xmm15, 144(%rsp)\n"
162 "\tpushq %rsi\n"
163 "\tpushq %rdi\n"
164 "\tpushq %rbp\n"
165 "\tpushq %rbx\n"
166 "\tpushq %r12\n"
167 "\tpushq %r13\n"
168 "\tpushq %r14\n"
169 "\tpushq %r15\n"
170 #if CORO_WIN_TIB
171 "\tpushq %fs:0x0\n"
172 "\tpushq %fs:0x8\n"
173 "\tpushq %fs:0xc\n"
174 #endif
175 "\tmovq %rsp, (%rcx)\n"
176 "\tmovq (%rdx), %rsp\n"
177 #if CORO_WIN_TIB
178 "\tpopq %fs:0xc\n"
179 "\tpopq %fs:0x8\n"
180 "\tpopq %fs:0x0\n"
181 #endif
182 "\tpopq %r15\n"
183 "\tpopq %r14\n"
184 "\tpopq %r13\n"
185 "\tpopq %r12\n"
186 "\tpopq %rbx\n"
187 "\tpopq %rbp\n"
188 "\tpopq %rdi\n"
189 "\tpopq %rsi\n"
190 "\tmovaps (%rsp), %xmm6\n"
191 "\tmovaps 16(%rsp), %xmm7\n"
192 "\tmovaps 32(%rsp), %xmm8\n"
193 "\tmovaps 48(%rsp), %xmm9\n"
194 "\tmovaps 64(%rsp), %xmm10\n"
195 "\tmovaps 80(%rsp), %xmm11\n"
196 "\tmovaps 96(%rsp), %xmm12\n"
197 "\tmovaps 112(%rsp), %xmm13\n"
198 "\tmovaps 128(%rsp), %xmm14\n"
199 "\tmovaps 144(%rsp), %xmm15\n"
200 "\taddq $168, %rsp\n"
201 #else
202 #define NUM_SAVED 6
203 "\tpushq %rbp\n"
204 "\tpushq %rbx\n"
205 "\tpushq %r12\n"
206 "\tpushq %r13\n"
207 "\tpushq %r14\n"
208 "\tpushq %r15\n"
209 "\tmovq %rsp, (%rdi)\n"
210 "\tmovq (%rsi), %rsp\n"
211 "\tpopq %r15\n"
212 "\tpopq %r14\n"
213 "\tpopq %r13\n"
214 "\tpopq %r12\n"
215 "\tpopq %rbx\n"
216 "\tpopq %rbp\n"
217 #endif
218 "\tpopq %rcx\n"
219 "\tjmpq *%rcx\n"
220
221 #elif __i386__
222
223 #define NUM_SAVED 4
224 "\tpushl %ebp\n"
225 "\tpushl %ebx\n"
226 "\tpushl %esi\n"
227 "\tpushl %edi\n"
228 #if CORO_WIN_TIB
229 #undef NUM_SAVED
230 #define NUM_SAVED 7
231 "\tpushl %fs:0\n"
232 "\tpushl %fs:4\n"
233 "\tpushl %fs:8\n"
234 #endif
235 "\tmovl %esp, (%eax)\n"
236 "\tmovl (%edx), %esp\n"
237 #if CORO_WIN_TIB
238 "\tpopl %fs:8\n"
239 "\tpopl %fs:4\n"
240 "\tpopl %fs:0\n"
241 #endif
242 "\tpopl %edi\n"
243 "\tpopl %esi\n"
244 "\tpopl %ebx\n"
245 "\tpopl %ebp\n"
246 "\tpopl %ecx\n"
247 "\tjmpl *%ecx\n"
248
249 #elif CORO_ARM /* untested, what about thumb, neon, iwmmxt? */
250
251 #if __ARM_PCS_VFP
252 "\tvpush {d8-d15}\n"
253 #define NUM_SAVED (9 + 8 * 2)
254 #else
255 #define NUM_SAVED 9
256 #endif
257 "\tpush {r4-r11,lr}\n"
258 "\tstr sp, [r0]\n"
259 "\tldr sp, [r1]\n"
260 "\tpop {r4-r11,lr}\n"
261 #if __ARM_PCS_VFP
262 "\tvpop {d8-d15}\n"
263 #endif
264 "\tmov r15, lr\n"
265
266 #elif __mips__ && 0 /* untested, 32 bit only */
267
268 #define NUM_SAVED (12 + 8 * 2)
269 /* TODO: n64/o64, lw=>ld */
270
271 "\t.set nomips16\n"
272 "\t.frame $sp,112,$31\n"
273 #if __mips_soft_float
274 "\taddiu $sp,$sp,-44\n"
275 #else
276 "\taddiu $sp,$sp,-112\n"
277 "\ts.d $f30,88($sp)\n"
278 "\ts.d $f28,80($sp)\n"
279 "\ts.d $f26,72($sp)\n"
280 "\ts.d $f24,64($sp)\n"
281 "\ts.d $f22,56($sp)\n"
282 "\ts.d $f20,48($sp)\n"
283 #endif
284 "\tsw $28,40($sp)\n"
285 "\tsw $31,36($sp)\n"
286 "\tsw $fp,32($sp)\n"
287 "\tsw $23,28($sp)\n"
288 "\tsw $22,24($sp)\n"
289 "\tsw $21,20($sp)\n"
290 "\tsw $20,16($sp)\n"
291 "\tsw $19,12($sp)\n"
292 "\tsw $18,8($sp)\n"
293 "\tsw $17,4($sp)\n"
294 "\tsw $16,0($sp)\n"
295 "\tsw $sp,0($4)\n"
296 "\tlw $sp,0($5)\n"
297 #if !__mips_soft_float
298 "\tl.d $f30,88($sp)\n"
299 "\tl.d $f28,80($sp)\n"
300 "\tl.d $f26,72($sp)\n"
301 "\tl.d $f24,64($sp)\n"
302 "\tl.d $f22,56($sp)\n"
303 "\tl.d $f20,48($sp)\n"
304 #endif
305 "\tlw $28,40($sp)\n"
306 "\tlw $31,36($sp)\n"
307 "\tlw $fp,32($sp)\n"
308 "\tlw $23,28($sp)\n"
309 "\tlw $22,24($sp)\n"
310 "\tlw $21,20($sp)\n"
311 "\tlw $20,16($sp)\n"
312 "\tlw $19,12($sp)\n"
313 "\tlw $18,8($sp)\n"
314 "\tlw $17,4($sp)\n"
315 "\tlw $16,0($sp)\n"
316 "\tj $31\n"
317 #if __mips_soft_float
318 "\taddiu $sp,$sp,44\n"
319 #else
320 "\taddiu $sp,$sp,112\n"
321 #endif
322
323 #else
324 #error unsupported architecture
325 #endif
326 );
327
328 # endif
329
330 void
331 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
332 {
333 coro_context nctx;
334 # if CORO_SJLJ
335 stack_t ostk, nstk;
336 struct sigaction osa, nsa;
337 sigset_t nsig, osig;
338 # endif
339
340 if (!coro)
341 return;
342
343 coro_init_func = coro;
344 coro_init_arg = arg;
345
346 new_coro = ctx;
347 create_coro = &nctx;
348
349 # if CORO_SJLJ
350 /* we use SIGUSR2. first block it, then fiddle with it. */
351
352 sigemptyset (&nsig);
353 sigaddset (&nsig, SIGUSR2);
354 sigprocmask (SIG_BLOCK, &nsig, &osig);
355
356 nsa.sa_handler = trampoline;
357 sigemptyset (&nsa.sa_mask);
358 nsa.sa_flags = SA_ONSTACK;
359
360 if (sigaction (SIGUSR2, &nsa, &osa))
361 {
362 perror ("sigaction");
363 abort ();
364 }
365
366 /* set the new stack */
367 nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */
368 nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize);
369 nstk.ss_flags = 0;
370
371 if (sigaltstack (&nstk, &ostk) < 0)
372 {
373 perror ("sigaltstack");
374 abort ();
375 }
376
377 trampoline_done = 0;
378 kill (getpid (), SIGUSR2);
379 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
380
381 while (!trampoline_done)
382 sigsuspend (&nsig);
383
384 sigaltstack (0, &nstk);
385 nstk.ss_flags = SS_DISABLE;
386 if (sigaltstack (&nstk, 0) < 0)
387 perror ("sigaltstack");
388
389 sigaltstack (0, &nstk);
390 if (~nstk.ss_flags & SS_DISABLE)
391 abort ();
392
393 if (~ostk.ss_flags & SS_DISABLE)
394 sigaltstack (&ostk, 0);
395
396 sigaction (SIGUSR2, &osa, 0);
397 sigprocmask (SIG_SETMASK, &osig, 0);
398
399 # elif CORO_LOSER
400
401 coro_setjmp (ctx->env);
402 #if __CYGWIN__ && __i386__
403 ctx->env[8] = (long) coro_init;
404 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
405 #elif __CYGWIN__ && __x86_64__
406 ctx->env[7] = (long) coro_init;
407 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
408 #elif defined __MINGW32__
409 ctx->env[5] = (long) coro_init;
410 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
411 #elif defined _M_IX86
412 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
413 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
414 #elif defined _M_AMD64
415 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
416 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
417 #elif defined _M_IA64
418 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
419 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
420 #else
421 #error "microsoft libc or architecture not supported"
422 #endif
423
424 # elif CORO_LINUX
425
426 coro_setjmp (ctx->env);
427 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
428 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
429 ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
430 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
431 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
432 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
433 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
434 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
435 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
436 #elif defined (__GNU_LIBRARY__) && defined (__x86_64__)
437 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
438 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
439 #else
440 #error "linux libc or architecture not supported"
441 #endif
442
443 # elif CORO_IRIX
444
445 coro_setjmp (ctx->env, 0);
446 ctx->env[JB_PC] = (__uint64_t)coro_init;
447 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
448
449 # elif CORO_ASM
450
451 #if __i386__ || __x86_64__
452 ctx->sp = (void **)(ssize + (char *)sptr);
453 *--ctx->sp = (void *)abort; /* needed for alignment only */
454 *--ctx->sp = (void *)coro_init;
455 #if CORO_WIN_TIB
456 *--ctx->sp = 0; /* ExceptionList */
457 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
458 *--ctx->sp = sptr; /* StackLimit */
459 #endif
460 #elif CORO_ARM
461 /* return address stored in lr register, don't push anything */
462 #else
463 #error unsupported architecture
464 #endif
465
466 ctx->sp -= NUM_SAVED;
467 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED);
468
469 #if __i386__ || __x86_64__
470 /* done already */
471 #elif CORO_ARM
472 ctx->sp[0] = coro; /* r4 */
473 ctx->sp[1] = arg; /* r5 */
474 ctx->sp[8] = (char *)coro_init; /* lr */
475 #else
476 #error unsupported architecture
477 #endif
478
479 # elif CORO_UCONTEXT
480
481 getcontext (&(ctx->uc));
482
483 ctx->uc.uc_link = 0;
484 ctx->uc.uc_stack.ss_sp = sptr;
485 ctx->uc.uc_stack.ss_size = (size_t)ssize;
486 ctx->uc.uc_stack.ss_flags = 0;
487
488 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
489
490 # endif
491
492 coro_transfer (create_coro, new_coro);
493 }
494
495 /*****************************************************************************/
496 /* pthread backend */
497 /*****************************************************************************/
498 #elif CORO_PTHREAD
499
500 /* this mutex will be locked by the running coroutine */
501 pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
502
503 struct coro_init_args
504 {
505 coro_func func;
506 void *arg;
507 coro_context *self, *main;
508 };
509
510 static pthread_t null_tid;
511
512 /* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
513 static void
514 mutex_unlock_wrapper (void *arg)
515 {
516 pthread_mutex_unlock ((pthread_mutex_t *)arg);
517 }
518
519 static void *
520 coro_init (void *args_)
521 {
522 struct coro_init_args *args = (struct coro_init_args *)args_;
523 coro_func func = args->func;
524 void *arg = args->arg;
525
526 pthread_mutex_lock (&coro_mutex);
527
528 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
529 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
530 coro_transfer (args->self, args->main);
531 func (arg);
532 pthread_cleanup_pop (1);
533
534 return 0;
535 }
536
537 void
538 coro_transfer (coro_context *prev, coro_context *next)
539 {
540 pthread_cond_signal (&next->cv);
541 pthread_cond_wait (&prev->cv, &coro_mutex);
542 #if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */
543 pthread_testcancel ();
544 #endif
545 }
546
547 void
548 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
549 {
550 static coro_context nctx;
551 static int once;
552
553 if (!once)
554 {
555 once = 1;
556
557 pthread_mutex_lock (&coro_mutex);
558 pthread_cond_init (&nctx.cv, 0);
559 null_tid = pthread_self ();
560 }
561
562 pthread_cond_init (&ctx->cv, 0);
563
564 if (coro)
565 {
566 pthread_attr_t attr;
567 struct coro_init_args args;
568
569 args.func = coro;
570 args.arg = arg;
571 args.self = ctx;
572 args.main = &nctx;
573
574 pthread_attr_init (&attr);
575 #if __UCLIBC__
576 /* exists, but is borked */
577 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/
578 #elif __CYGWIN__
579 /* POSIX, not here */
580 pthread_attr_setstacksize (&attr, (size_t)ssize);
581 #else
582 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
583 #endif
584 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
585 pthread_create (&ctx->id, &attr, coro_init, &args);
586
587 coro_transfer (args.main, args.self);
588 }
589 else
590 ctx->id = null_tid;
591 }
592
593 void
594 coro_destroy (coro_context *ctx)
595 {
596 if (!pthread_equal (ctx->id, null_tid))
597 {
598 pthread_cancel (ctx->id);
599 pthread_mutex_unlock (&coro_mutex); /* let the other coro run */
600 pthread_join (ctx->id, 0);
601 pthread_mutex_lock (&coro_mutex);
602 }
603
604 pthread_cond_destroy (&ctx->cv);
605 }
606
607 /*****************************************************************************/
608 /* fiber backend */
609 /*****************************************************************************/
610 #elif CORO_FIBER
611
612 #define WIN32_LEAN_AND_MEAN
613 #if _WIN32_WINNT < 0x0400
614 #undef _WIN32_WINNT
615 #define _WIN32_WINNT 0x0400
616 #endif
617 #include <windows.h>
618
619 VOID CALLBACK
620 coro_init (PVOID arg)
621 {
622 coro_context *ctx = (coro_context *)arg;
623
624 ctx->coro (ctx->arg);
625 }
626
627 void
628 coro_transfer (coro_context *prev, coro_context *next)
629 {
630 if (!prev->fiber)
631 {
632 prev->fiber = GetCurrentFiber ();
633
634 if (prev->fiber == 0 || prev->fiber == (void *)0x1e00)
635 prev->fiber = ConvertThreadToFiber (0);
636 }
637
638 SwitchToFiber (next->fiber);
639 }
640
641 void
642 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
643 {
644 ctx->fiber = 0;
645 ctx->coro = coro;
646 ctx->arg = arg;
647
648 if (!coro)
649 return;
650
651 ctx->fiber = CreateFiber (ssize, coro_init, ctx);
652 }
653
654 void
655 coro_destroy (coro_context *ctx)
656 {
657 DeleteFiber (ctx->fiber);
658 }
659
660 #else
661 #error unsupported backend
662 #endif
663
664 /*****************************************************************************/
665 /* stack management */
666 /*****************************************************************************/
667 #if CORO_STACKALLOC
668
669 #include <stdlib.h>
670
671 #ifndef _WIN32
672 # include <unistd.h>
673 #endif
674
675 #if CORO_USE_VALGRIND
676 # include <valgrind/valgrind.h>
677 #endif
678
679 #if _POSIX_MAPPED_FILES
680 # include <sys/mman.h>
681 # define CORO_MMAP 1
682 # ifndef MAP_ANONYMOUS
683 # ifdef MAP_ANON
684 # define MAP_ANONYMOUS MAP_ANON
685 # else
686 # undef CORO_MMAP
687 # endif
688 # endif
689 # include <limits.h>
690 #else
691 # undef CORO_MMAP
692 #endif
693
694 #if _POSIX_MEMORY_PROTECTION
695 # ifndef CORO_GUARDPAGES
696 # define CORO_GUARDPAGES 4
697 # endif
698 #else
699 # undef CORO_GUARDPAGES
700 #endif
701
702 #if !CORO_MMAP
703 # undef CORO_GUARDPAGES
704 #endif
705
706 #if !__i386__ && !__x86_64__ && !__powerpc__ && !__arm__ && !__aarch64__ && !__m68k__ && !__alpha__ && !__mips__ && !__sparc64__
707 # undef CORO_GUARDPAGES
708 #endif
709
710 #ifndef CORO_GUARDPAGES
711 # define CORO_GUARDPAGES 0
712 #endif
713
714 #if !PAGESIZE
715 #if !CORO_MMAP
716 #define PAGESIZE 4096
717 #else
718 static size_t
719 coro_pagesize (void)
720 {
721 static size_t pagesize;
722
723 if (!pagesize)
724 pagesize = sysconf (_SC_PAGESIZE);
725
726 return pagesize;
727 }
728
729 #define PAGESIZE coro_pagesize ()
730 #endif
731 #endif
732
733 int
734 coro_stack_alloc (struct coro_stack *stack, unsigned int size)
735 {
736 if (!size)
737 size = 256 * 1024;
738
739 stack->sptr = 0;
740 stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE;
741
742 #if CORO_FIBER
743
744 stack->sptr = (void *)stack;
745 return 1;
746
747 #else
748
749 size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE;
750 void *base;
751
752 #if CORO_MMAP
753 /* mmap supposedly does allocate-on-write for us */
754 base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
755
756 if (base == (void *)-1)
757 {
758 /* some systems don't let us have executable heap */
759 /* we assume they won't need executable stack in that case */
760 base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
761
762 if (base == (void *)-1)
763 return 0;
764 }
765
766 #if CORO_GUARDPAGES
767 mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE);
768 #endif
769
770 base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE);
771 #else
772 base = malloc (ssze);
773 if (!base)
774 return 0;
775 #endif
776
777 #if CORO_USE_VALGRIND
778 stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE);
779 #endif
780
781 stack->sptr = base;
782 return 1;
783
784 #endif
785 }
786
787 void
788 coro_stack_free (struct coro_stack *stack)
789 {
790 #if CORO_FIBER
791 /* nop */
792 #else
793 #if CORO_USE_VALGRIND
794 VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
795 #endif
796
797 #if CORO_MMAP
798 if (stack->sptr)
799 munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
800 stack->ssze + CORO_GUARDPAGES * PAGESIZE);
801 #else
802 free (stack->sptr);
803 #endif
804 #endif
805 }
806
807 #endif
808