ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
Revision: 1.66
Committed: Fri Dec 7 14:21:09 2012 UTC (11 years, 5 months ago) by root
Content type: text/plain
Branch: MAIN
CVS Tags: rel-6_23
Changes since 1.65: +154 -5 lines
Log Message:
libecoro, erhm, stack management

File Contents

# Content
1 /*
2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 *
4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
22 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
23 * OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Alternatively, the contents of this file may be used under the terms of
26 * the GNU General Public License ("GPL") version 2 or any later version,
27 * in which case the provisions of the GPL are applicable instead of
28 * the above. If you wish to allow the use of your version of this file
29 * only under the terms of the GPL and not to allow others to use your
30 * version of this file under the BSD license, indicate your decision
31 * by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL. If you do not delete the
33 * provisions above, a recipient may use your version of this file under
34 * either the BSD or the GPL.
35 *
36 * This library is modelled strictly after Ralf S. Engelschalls article at
37 * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must
38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */
40
41 #include "coro.h"
42
43 #include <stddef.h>
44 #include <string.h>
45
46 /*****************************************************************************/
47 /* ucontext/setjmp/asm backends */
48 /*****************************************************************************/
49 #if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
50
51 # if CORO_UCONTEXT
52 # include <stddef.h>
53 # endif
54
55 # if !defined(STACK_ADJUST_PTR)
56 # if __sgi
57 /* IRIX is decidedly NON-unix */
58 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
59 # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
60 # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
61 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
62 # define STACK_ADJUST_SIZE(sp,ss) (ss)
63 # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
64 # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
65 # define STACK_ADJUST_SIZE(sp,ss) (ss)
66 # else
67 # define STACK_ADJUST_PTR(sp,ss) (sp)
68 # define STACK_ADJUST_SIZE(sp,ss) (ss)
69 # endif
70 # endif
71
72 # include <stdlib.h>
73
74 # if CORO_SJLJ
75 # include <stdio.h>
76 # include <signal.h>
77 # include <unistd.h>
78 # endif
79
80 static coro_func coro_init_func;
81 static void *coro_init_arg;
82 static coro_context *new_coro, *create_coro;
83
84 static void
85 coro_init (void)
86 {
87 volatile coro_func func = coro_init_func;
88 volatile void *arg = coro_init_arg;
89
90 coro_transfer (new_coro, create_coro);
91
92 #if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
93 asm (".cfi_undefined rip");
94 #endif
95
96 func ((void *)arg);
97
98 /* the new coro returned. bad. just abort() for now */
99 abort ();
100 }
101
102 # if CORO_SJLJ
103
104 static volatile int trampoline_done;
105
106 /* trampoline signal handler */
107 static void
108 trampoline (int sig)
109 {
110 if (coro_setjmp (new_coro->env))
111 coro_init (); /* start it */
112 else
113 trampoline_done = 1;
114 }
115
116 # endif
117
118 # if CORO_ASM
119
120 #if _WIN32 || __CYGWIN__
121 #define CORO_WIN_TIB 1
122 #endif
123
124 asm (
125 "\t.text\n"
126 #if _WIN32 || __CYGWIN__
127 "\t.globl _coro_transfer\n"
128 "_coro_transfer:\n"
129 #else
130 "\t.globl coro_transfer\n"
131 "coro_transfer:\n"
132 #endif
133 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
134 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
135 #if __amd64
136
137 #if _WIN32 || __CYGWIN__
138 #define NUM_SAVED 29
139 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */
140 "\tmovaps %xmm6, (%rsp)\n"
141 "\tmovaps %xmm7, 16(%rsp)\n"
142 "\tmovaps %xmm8, 32(%rsp)\n"
143 "\tmovaps %xmm9, 48(%rsp)\n"
144 "\tmovaps %xmm10, 64(%rsp)\n"
145 "\tmovaps %xmm11, 80(%rsp)\n"
146 "\tmovaps %xmm12, 96(%rsp)\n"
147 "\tmovaps %xmm13, 112(%rsp)\n"
148 "\tmovaps %xmm14, 128(%rsp)\n"
149 "\tmovaps %xmm15, 144(%rsp)\n"
150 "\tpushq %rsi\n"
151 "\tpushq %rdi\n"
152 "\tpushq %rbp\n"
153 "\tpushq %rbx\n"
154 "\tpushq %r12\n"
155 "\tpushq %r13\n"
156 "\tpushq %r14\n"
157 "\tpushq %r15\n"
158 #if CORO_WIN_TIB
159 "\tpushq %fs:0x0\n"
160 "\tpushq %fs:0x8\n"
161 "\tpushq %fs:0xc\n"
162 #endif
163 "\tmovq %rsp, (%rcx)\n"
164 "\tmovq (%rdx), %rsp\n"
165 #if CORO_WIN_TIB
166 "\tpopq %fs:0xc\n"
167 "\tpopq %fs:0x8\n"
168 "\tpopq %fs:0x0\n"
169 #endif
170 "\tpopq %r15\n"
171 "\tpopq %r14\n"
172 "\tpopq %r13\n"
173 "\tpopq %r12\n"
174 "\tpopq %rbx\n"
175 "\tpopq %rbp\n"
176 "\tpopq %rdi\n"
177 "\tpopq %rsi\n"
178 "\tmovaps (%rsp), %xmm6\n"
179 "\tmovaps 16(%rsp), %xmm7\n"
180 "\tmovaps 32(%rsp), %xmm8\n"
181 "\tmovaps 48(%rsp), %xmm9\n"
182 "\tmovaps 64(%rsp), %xmm10\n"
183 "\tmovaps 80(%rsp), %xmm11\n"
184 "\tmovaps 96(%rsp), %xmm12\n"
185 "\tmovaps 112(%rsp), %xmm13\n"
186 "\tmovaps 128(%rsp), %xmm14\n"
187 "\tmovaps 144(%rsp), %xmm15\n"
188 "\taddq $168, %rsp\n"
189 #else
190 #define NUM_SAVED 6
191 "\tpushq %rbp\n"
192 "\tpushq %rbx\n"
193 "\tpushq %r12\n"
194 "\tpushq %r13\n"
195 "\tpushq %r14\n"
196 "\tpushq %r15\n"
197 "\tmovq %rsp, (%rdi)\n"
198 "\tmovq (%rsi), %rsp\n"
199 "\tpopq %r15\n"
200 "\tpopq %r14\n"
201 "\tpopq %r13\n"
202 "\tpopq %r12\n"
203 "\tpopq %rbx\n"
204 "\tpopq %rbp\n"
205 #endif
206 "\tpopq %rcx\n"
207 "\tjmpq *%rcx\n"
208
209 #elif __i386
210
211 #define NUM_SAVED 4
212 "\tpushl %ebp\n"
213 "\tpushl %ebx\n"
214 "\tpushl %esi\n"
215 "\tpushl %edi\n"
216 #if CORO_WIN_TIB
217 #undef NUM_SAVED
218 #define NUM_SAVED 7
219 "\tpushl %fs:0\n"
220 "\tpushl %fs:4\n"
221 "\tpushl %fs:8\n"
222 #endif
223 "\tmovl %esp, (%eax)\n"
224 "\tmovl (%edx), %esp\n"
225 #if CORO_WIN_TIB
226 "\tpopl %fs:8\n"
227 "\tpopl %fs:4\n"
228 "\tpopl %fs:0\n"
229 #endif
230 "\tpopl %edi\n"
231 "\tpopl %esi\n"
232 "\tpopl %ebx\n"
233 "\tpopl %ebp\n"
234 "\tpopl %ecx\n"
235 "\tjmpl *%ecx\n"
236
237 #else
238 #error unsupported architecture
239 #endif
240 );
241
242 # endif
243
244 void
245 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
246 {
247 coro_context nctx;
248 # if CORO_SJLJ
249 stack_t ostk, nstk;
250 struct sigaction osa, nsa;
251 sigset_t nsig, osig;
252 # endif
253
254 if (!coro)
255 return;
256
257 coro_init_func = coro;
258 coro_init_arg = arg;
259
260 new_coro = ctx;
261 create_coro = &nctx;
262
263 # if CORO_SJLJ
264 /* we use SIGUSR2. first block it, then fiddle with it. */
265
266 sigemptyset (&nsig);
267 sigaddset (&nsig, SIGUSR2);
268 sigprocmask (SIG_BLOCK, &nsig, &osig);
269
270 nsa.sa_handler = trampoline;
271 sigemptyset (&nsa.sa_mask);
272 nsa.sa_flags = SA_ONSTACK;
273
274 if (sigaction (SIGUSR2, &nsa, &osa))
275 {
276 perror ("sigaction");
277 abort ();
278 }
279
280 /* set the new stack */
281 nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */
282 nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize);
283 nstk.ss_flags = 0;
284
285 if (sigaltstack (&nstk, &ostk) < 0)
286 {
287 perror ("sigaltstack");
288 abort ();
289 }
290
291 trampoline_done = 0;
292 kill (getpid (), SIGUSR2);
293 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
294
295 while (!trampoline_done)
296 sigsuspend (&nsig);
297
298 sigaltstack (0, &nstk);
299 nstk.ss_flags = SS_DISABLE;
300 if (sigaltstack (&nstk, 0) < 0)
301 perror ("sigaltstack");
302
303 sigaltstack (0, &nstk);
304 if (~nstk.ss_flags & SS_DISABLE)
305 abort ();
306
307 if (~ostk.ss_flags & SS_DISABLE)
308 sigaltstack (&ostk, 0);
309
310 sigaction (SIGUSR2, &osa, 0);
311 sigprocmask (SIG_SETMASK, &osig, 0);
312
313 # elif CORO_LOSER
314
315 coro_setjmp (ctx->env);
316 #if __CYGWIN__ && __i386
317 ctx->env[8] = (long) coro_init;
318 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
319 #elif __CYGWIN__ && __x86_64
320 ctx->env[7] = (long) coro_init;
321 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
322 #elif defined __MINGW32__
323 ctx->env[5] = (long) coro_init;
324 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
325 #elif defined _M_IX86
326 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
327 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
328 #elif defined _M_AMD64
329 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
330 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
331 #elif defined _M_IA64
332 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
333 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
334 #else
335 #error "microsoft libc or architecture not supported"
336 #endif
337
338 # elif CORO_LINUX
339
340 coro_setjmp (ctx->env);
341 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
342 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
343 ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
344 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
345 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
346 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
347 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
348 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
349 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
350 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
351 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
352 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
353 #else
354 #error "linux libc or architecture not supported"
355 #endif
356
357 # elif CORO_IRIX
358
359 coro_setjmp (ctx->env, 0);
360 ctx->env[JB_PC] = (__uint64_t)coro_init;
361 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
362
363 # elif CORO_ASM
364
365 ctx->sp = (void **)(ssize + (char *)sptr);
366 *--ctx->sp = (void *)abort; /* needed for alignment only */
367 *--ctx->sp = (void *)coro_init;
368
369 #if CORO_WIN_TIB
370 *--ctx->sp = 0; /* ExceptionList */
371 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
372 *--ctx->sp = sptr; /* StackLimit */
373 #endif
374
375 ctx->sp -= NUM_SAVED;
376 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED);
377
378 # elif CORO_UCONTEXT
379
380 getcontext (&(ctx->uc));
381
382 ctx->uc.uc_link = 0;
383 ctx->uc.uc_stack.ss_sp = sptr;
384 ctx->uc.uc_stack.ss_size = (size_t)ssize;
385 ctx->uc.uc_stack.ss_flags = 0;
386
387 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
388
389 # endif
390
391 coro_transfer (create_coro, new_coro);
392 }
393
394 /*****************************************************************************/
395 /* pthread backend */
396 /*****************************************************************************/
397 #elif CORO_PTHREAD
398
399 /* this mutex will be locked by the running coroutine */
400 pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
401
402 struct coro_init_args
403 {
404 coro_func func;
405 void *arg;
406 coro_context *self, *main;
407 };
408
409 static pthread_t null_tid;
410
411 /* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
412 static void
413 mutex_unlock_wrapper (void *arg)
414 {
415 pthread_mutex_unlock ((pthread_mutex_t *)arg);
416 }
417
418 static void *
419 coro_init (void *args_)
420 {
421 struct coro_init_args *args = (struct coro_init_args *)args_;
422 coro_func func = args->func;
423 void *arg = args->arg;
424
425 pthread_mutex_lock (&coro_mutex);
426
427 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
428 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
429 coro_transfer (args->self, args->main);
430 func (arg);
431 pthread_cleanup_pop (1);
432
433 return 0;
434 }
435
436 void
437 coro_transfer (coro_context *prev, coro_context *next)
438 {
439 pthread_cond_signal (&next->cv);
440 pthread_cond_wait (&prev->cv, &coro_mutex);
441 #if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */
442 pthread_testcancel ();
443 #endif
444 }
445
446 void
447 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
448 {
449 static coro_context nctx;
450 static int once;
451
452 if (!once)
453 {
454 once = 1;
455
456 pthread_mutex_lock (&coro_mutex);
457 pthread_cond_init (&nctx.cv, 0);
458 null_tid = pthread_self ();
459 }
460
461 pthread_cond_init (&ctx->cv, 0);
462
463 if (coro)
464 {
465 pthread_attr_t attr;
466 struct coro_init_args args;
467
468 args.func = coro;
469 args.arg = arg;
470 args.self = ctx;
471 args.main = &nctx;
472
473 pthread_attr_init (&attr);
474 #if __UCLIBC__
475 /* exists, but is borked */
476 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/
477 #elif __CYGWIN__
478 /* POSIX, not here */
479 pthread_attr_setstacksize (&attr, (size_t)ssize);
480 #else
481 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
482 #endif
483 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
484 pthread_create (&ctx->id, &attr, coro_init, &args);
485
486 coro_transfer (args.main, args.self);
487 }
488 else
489 ctx->id = null_tid;
490 }
491
492 void
493 coro_destroy (coro_context *ctx)
494 {
495 if (!pthread_equal (ctx->id, null_tid))
496 {
497 pthread_cancel (ctx->id);
498 pthread_mutex_unlock (&coro_mutex);
499 pthread_join (ctx->id, 0);
500 pthread_mutex_lock (&coro_mutex);
501 }
502
503 pthread_cond_destroy (&ctx->cv);
504 }
505
506 /*****************************************************************************/
507 /* fiber backend */
508 /*****************************************************************************/
509 #elif CORO_FIBER
510
511 #define WIN32_LEAN_AND_MEAN
512 #if _WIN32_WINNT < 0x0400
513 #undef _WIN32_WINNT
514 #define _WIN32_WINNT 0x0400
515 #endif
516 #include <windows.h>
517
518 VOID CALLBACK
519 coro_init (PVOID arg)
520 {
521 coro_context *ctx = (coro_context *)arg;
522
523 ctx->coro (ctx->arg);
524 }
525
526 void
527 coro_transfer (coro_context *prev, coro_context *next)
528 {
529 if (!prev->fiber)
530 {
531 prev->fiber = GetCurrentFiber ();
532
533 if (prev->fiber == 0 || prev->fiber == (void *)0x1e00)
534 prev->fiber = ConvertThreadToFiber (0);
535 }
536
537 SwitchToFiber (next->fiber);
538 }
539
540 void
541 coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
542 {
543 ctx->fiber = 0;
544 ctx->coro = coro;
545 ctx->arg = arg;
546
547 if (!coro)
548 return;
549
550 ctx->fiber = CreateFiber (ssize, coro_init, ctx);
551 }
552
553 void
554 coro_destroy (coro_context *ctx)
555 {
556 DeleteFiber (ctx->fiber);
557 }
558
559 #else
560 #error unsupported backend
561 #endif
562
563 /*****************************************************************************/
564 /* stack management */
565 /*****************************************************************************/
566 #if CORO_STACKALLOC
567
568 #include <stdlib.h>
569
570 #ifndef _WIN32
571 # include <unistd.h>
572 #endif
573
574 #if CORO_USE_VALGRIND
575 # include <valgrind/valgrind.h>
576 #endif
577
578 #if _POSIX_MAPPED_FILES
579 # include <sys/mman.h>
580 # define CORO_MMAP 1
581 # ifndef MAP_ANONYMOUS
582 # ifdef MAP_ANON
583 # define MAP_ANONYMOUS MAP_ANON
584 # else
585 # undef CORO_MMAP
586 # endif
587 # endif
588 # include <limits.h>
589 #else
590 # undef CORO_MMAP
591 #endif
592
593 #if _POSIX_MEMORY_PROTECTION
594 # ifndef CORO_GUARDPAGES
595 # define CORO_GUARDPAGES 4
596 # endif
597 #else
598 # undef CORO_GUARDPAGES
599 #endif
600
601 #if !CORO_MMAP
602 # undef CORO_GUARDPAGES
603 #endif
604
605 #if !__i386 && !__x86_64 && !__powerpc && !__m68k && !__alpha && !__mips && !__sparc64
606 # undef CORO_GUARDPAGES
607 #endif
608
609 #ifndef CORO_GUARDPAGES
610 # define CORO_GUARDPAGES 0
611 #endif
612
613 #if !PAGESIZE
614 #if !CORO_MMAP
615 #define PAGESIZE 4096
616 #else
617 static size_t
618 coro_pagesize (void)
619 {
620 static size_t pagesize;
621
622 if (!pagesize)
623 pagesize = sysconf (_SC_PAGESIZE);
624
625 return pagesize;
626 }
627
628 #define PAGESIZE coro_pagesize ()
629 #endif
630 #endif
631
632 int
633 coro_stack_alloc (struct coro_stack *stack, unsigned int size)
634 {
635 if (!size)
636 size = 256 * 1024;
637
638 stack->sptr = 0;
639 stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE;
640
641 #if CORO_FIBER
642
643 stack->sptr = (void *)stack;
644 return 1;
645
646 #else
647
648 size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE;
649 void *base;
650
651 #if CORO_MMAP
652 /* mmap supposedly does allocate-on-write for us */
653 base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
654
655 if (base == (void *)-1)
656 {
657 /* some systems don't let us have executable heap */
658 /* we assume they won't need executable stack in that case */
659 base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
660
661 if (base == (void *)-1)
662 return 0;
663 }
664
665 #if CORO_GUARDPAGES
666 mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE);
667 #endif
668
669 base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE);
670 #else
671 base = malloc (ssze);
672 if (!base)
673 return 0;
674 #endif
675
676 #if CORO_USE_VALGRIND
677 stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, (char *)ssze - CORO_GUARDPAGES * PAGESIZE);
678 #endif
679
680 stack->sptr = base;
681 return 1;
682
683 #endif
684 }
685
686 void
687 coro_stack_free (struct coro_stack *stack)
688 {
689 #if CORO_FIBER
690 /* nop */
691 #else
692 #if CORO_USE_VALGRIND
693 VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
694 #endif
695
696 #if CORO_MMAP
697 if (stack->sptr)
698 munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
699 stack->ssze + CORO_GUARDPAGES * PAGESIZE);
700 #else
701 free (stack->sptr);
702 #endif
703 #endif
704 }
705
706 #endif
707