ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.62 by root, Mon Aug 8 22:00:18 2011 UTC vs.
Revision 1.66 by root, Fri Dec 7 14:21:09 2012 UTC

1/* 1/*
2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer. 8 * this list of conditions and the following disclaimer.
9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <stddef.h>
43#include <string.h> 44#include <string.h>
44 45
45/*****************************************************************************/ 46/*****************************************************************************/
46/* ucontext/setjmp/asm backends */ 47/* ucontext/setjmp/asm backends */
47/*****************************************************************************/ 48/*****************************************************************************/
114 115
115# endif 116# endif
116 117
117# if CORO_ASM 118# if CORO_ASM
118 119
119 #if _WIN32 120 #if _WIN32 || __CYGWIN__
120 #define CORO_WIN_TIB 1 121 #define CORO_WIN_TIB 1
121 #endif 122 #endif
122 123
123 asm ( 124 asm (
124 "\t.text\n" 125 "\t.text\n"
126 #if _WIN32 || __CYGWIN__
127 "\t.globl _coro_transfer\n"
128 "_coro_transfer:\n"
129 #else
125 "\t.globl coro_transfer\n" 130 "\t.globl coro_transfer\n"
126 "coro_transfer:\n" 131 "coro_transfer:\n"
132 #endif
127 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */ 133 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
128 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */ 134 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
129 #if __amd64 135 #if __amd64
130 #ifdef WIN32 136
131 /* TODO: xmm6..15 also would need to be saved. sigh. */ 137 #if _WIN32 || __CYGWIN__
132 #define NUM_SAVED 8 138 #define NUM_SAVED 29
139 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */
140 "\tmovaps %xmm6, (%rsp)\n"
141 "\tmovaps %xmm7, 16(%rsp)\n"
142 "\tmovaps %xmm8, 32(%rsp)\n"
143 "\tmovaps %xmm9, 48(%rsp)\n"
144 "\tmovaps %xmm10, 64(%rsp)\n"
145 "\tmovaps %xmm11, 80(%rsp)\n"
146 "\tmovaps %xmm12, 96(%rsp)\n"
147 "\tmovaps %xmm13, 112(%rsp)\n"
148 "\tmovaps %xmm14, 128(%rsp)\n"
149 "\tmovaps %xmm15, 144(%rsp)\n"
133 "\tpushq %rsi\n" 150 "\tpushq %rsi\n"
134 "\tpushq %rdi\n" 151 "\tpushq %rdi\n"
135 "\tpushq %rbp\n" 152 "\tpushq %rbp\n"
136 "\tpushq %rbx\n" 153 "\tpushq %rbx\n"
137 "\tpushq %r12\n" 154 "\tpushq %r12\n"
156 "\tpopq %r12\n" 173 "\tpopq %r12\n"
157 "\tpopq %rbx\n" 174 "\tpopq %rbx\n"
158 "\tpopq %rbp\n" 175 "\tpopq %rbp\n"
159 "\tpopq %rdi\n" 176 "\tpopq %rdi\n"
160 "\tpopq %rsi\n" 177 "\tpopq %rsi\n"
178 "\tmovaps (%rsp), %xmm6\n"
179 "\tmovaps 16(%rsp), %xmm7\n"
180 "\tmovaps 32(%rsp), %xmm8\n"
181 "\tmovaps 48(%rsp), %xmm9\n"
182 "\tmovaps 64(%rsp), %xmm10\n"
183 "\tmovaps 80(%rsp), %xmm11\n"
184 "\tmovaps 96(%rsp), %xmm12\n"
185 "\tmovaps 112(%rsp), %xmm13\n"
186 "\tmovaps 128(%rsp), %xmm14\n"
187 "\tmovaps 144(%rsp), %xmm15\n"
188 "\taddq $168, %rsp\n"
161 #else 189 #else
162 #define NUM_SAVED 6 190 #define NUM_SAVED 6
163 "\tpushq %rbp\n" 191 "\tpushq %rbp\n"
164 "\tpushq %rbx\n" 192 "\tpushq %rbx\n"
165 "\tpushq %r12\n" 193 "\tpushq %r12\n"
173 "\tpopq %r13\n" 201 "\tpopq %r13\n"
174 "\tpopq %r12\n" 202 "\tpopq %r12\n"
175 "\tpopq %rbx\n" 203 "\tpopq %rbx\n"
176 "\tpopq %rbp\n" 204 "\tpopq %rbp\n"
177 #endif 205 #endif
206 "\tpopq %rcx\n"
207 "\tjmpq *%rcx\n"
208
178 #elif __i386 209 #elif __i386
210
179 #define NUM_SAVED 4 211 #define NUM_SAVED 4
180 "\tpushl %ebp\n" 212 "\tpushl %ebp\n"
181 "\tpushl %ebx\n" 213 "\tpushl %ebx\n"
182 "\tpushl %esi\n" 214 "\tpushl %esi\n"
183 "\tpushl %edi\n" 215 "\tpushl %edi\n"
184 #if CORO_WIN_TIB 216 #if CORO_WIN_TIB
217 #undef NUM_SAVED
218 #define NUM_SAVED 7
185 "\tpushl %fs:0\n" 219 "\tpushl %fs:0\n"
186 "\tpushl %fs:4\n" 220 "\tpushl %fs:4\n"
187 "\tpushl %fs:8\n" 221 "\tpushl %fs:8\n"
188 #endif 222 #endif
189 "\tmovl %esp, (%eax)\n" 223 "\tmovl %esp, (%eax)\n"
195 #endif 229 #endif
196 "\tpopl %edi\n" 230 "\tpopl %edi\n"
197 "\tpopl %esi\n" 231 "\tpopl %esi\n"
198 "\tpopl %ebx\n" 232 "\tpopl %ebx\n"
199 "\tpopl %ebp\n" 233 "\tpopl %ebp\n"
234 "\tpopl %ecx\n"
235 "\tjmpl *%ecx\n"
236
200 #else 237 #else
201 #error unsupported architecture 238 #error unsupported architecture
202 #endif 239 #endif
203 "\tret\n"
204 ); 240 );
205 241
206# endif 242# endif
207 243
208void 244void
209coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize) 245coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
210{ 246{
211 coro_context nctx; 247 coro_context nctx;
212# if CORO_SJLJ 248# if CORO_SJLJ
213 stack_t ostk, nstk; 249 stack_t ostk, nstk;
214 struct sigaction osa, nsa; 250 struct sigaction osa, nsa;
281 ctx->env[8] = (long) coro_init; 317 ctx->env[8] = (long) coro_init;
282 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); 318 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
283 #elif __CYGWIN__ && __x86_64 319 #elif __CYGWIN__ && __x86_64
284 ctx->env[7] = (long) coro_init; 320 ctx->env[7] = (long) coro_init;
285 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); 321 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
286 #elif defined(__MINGW32__) 322 #elif defined __MINGW32__
287 ctx->env[5] = (long) coro_init; 323 ctx->env[5] = (long) coro_init;
288 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); 324 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
289 #elif defined(_M_IX86) 325 #elif defined _M_IX86
290 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; 326 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
291 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 327 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
292 #elif defined(_M_AMD64) 328 #elif defined _M_AMD64
293 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; 329 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
294 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 330 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
295 #elif defined(_M_IA64) 331 #elif defined _M_IA64
296 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; 332 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
297 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 333 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
298 #else 334 #else
299 #error "microsoft libc or architecture not supported" 335 #error "microsoft libc or architecture not supported"
300 #endif 336 #endif
406 pthread_testcancel (); 442 pthread_testcancel ();
407#endif 443#endif
408} 444}
409 445
410void 446void
411coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize) 447coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
412{ 448{
413 static coro_context nctx; 449 static coro_context nctx;
414 static int once; 450 static int once;
415 451
416 if (!once) 452 if (!once)
465 } 501 }
466 502
467 pthread_cond_destroy (&ctx->cv); 503 pthread_cond_destroy (&ctx->cv);
468} 504}
469 505
506/*****************************************************************************/
507/* fiber backend */
508/*****************************************************************************/
509#elif CORO_FIBER
510
511#define WIN32_LEAN_AND_MEAN
512#if _WIN32_WINNT < 0x0400
513 #undef _WIN32_WINNT
514 #define _WIN32_WINNT 0x0400
515#endif
516#include <windows.h>
517
518VOID CALLBACK
519coro_init (PVOID arg)
520{
521 coro_context *ctx = (coro_context *)arg;
522
523 ctx->coro (ctx->arg);
524}
525
526void
527coro_transfer (coro_context *prev, coro_context *next)
528{
529 if (!prev->fiber)
530 {
531 prev->fiber = GetCurrentFiber ();
532
533 if (prev->fiber == 0 || prev->fiber == (void *)0x1e00)
534 prev->fiber = ConvertThreadToFiber (0);
535 }
536
537 SwitchToFiber (next->fiber);
538}
539
540void
541coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
542{
543 ctx->fiber = 0;
544 ctx->coro = coro;
545 ctx->arg = arg;
546
547 if (!coro)
548 return;
549
550 ctx->fiber = CreateFiber (ssize, coro_init, ctx);
551}
552
553void
554coro_destroy (coro_context *ctx)
555{
556 DeleteFiber (ctx->fiber);
557}
558
470#else 559#else
471# error unsupported backend 560 #error unsupported backend
561#endif
562
563/*****************************************************************************/
564/* stack management */
565/*****************************************************************************/
566#if CORO_STACKALLOC
567
568#include <stdlib.h>
569
570#ifndef _WIN32
571# include <unistd.h>
572#endif
573
574#if CORO_USE_VALGRIND
575# include <valgrind/valgrind.h>
576#endif
577
578#if _POSIX_MAPPED_FILES
579# include <sys/mman.h>
580# define CORO_MMAP 1
581# ifndef MAP_ANONYMOUS
582# ifdef MAP_ANON
583# define MAP_ANONYMOUS MAP_ANON
584# else
585# undef CORO_MMAP
586# endif
472#endif 587# endif
588# include <limits.h>
589#else
590# undef CORO_MMAP
591#endif
473 592
593#if _POSIX_MEMORY_PROTECTION
594# ifndef CORO_GUARDPAGES
595# define CORO_GUARDPAGES 4
596# endif
597#else
598# undef CORO_GUARDPAGES
599#endif
600
601#if !CORO_MMAP
602# undef CORO_GUARDPAGES
603#endif
604
605#if !__i386 && !__x86_64 && !__powerpc && !__m68k && !__alpha && !__mips && !__sparc64
606# undef CORO_GUARDPAGES
607#endif
608
609#ifndef CORO_GUARDPAGES
610# define CORO_GUARDPAGES 0
611#endif
612
613#if !PAGESIZE
614 #if !CORO_MMAP
615 #define PAGESIZE 4096
616 #else
617 static size_t
618 coro_pagesize (void)
619 {
620 static size_t pagesize;
621
622 if (!pagesize)
623 pagesize = sysconf (_SC_PAGESIZE);
624
625 return pagesize;
626 }
627
628 #define PAGESIZE coro_pagesize ()
629 #endif
630#endif
631
632int
633coro_stack_alloc (struct coro_stack *stack, unsigned int size)
634{
635 if (!size)
636 size = 256 * 1024;
637
638 stack->sptr = 0;
639 stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE;
640
641#if CORO_FIBER
642
643 stack->sptr = (void *)stack;
644 return 1;
645
646#else
647
648 size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE;
649 void *base;
650
651 #if CORO_MMAP
652 /* mmap supposedly does allocate-on-write for us */
653 base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
654
655 if (base == (void *)-1)
656 {
657 /* some systems don't let us have executable heap */
658 /* we assume they won't need executable stack in that case */
659 base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
660
661 if (base == (void *)-1)
662 return 0;
663 }
664
665 #if CORO_GUARDPAGES
666 mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE);
667 #endif
668
669 base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE);
670 #else
671 base = malloc (ssze);
672 if (!base)
673 return 0;
674 #endif
675
676 #if CORO_USE_VALGRIND
677 stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, (char *)ssze - CORO_GUARDPAGES * PAGESIZE);
678 #endif
679
680 stack->sptr = base;
681 return 1;
682
683#endif
684}
685
686void
687coro_stack_free (struct coro_stack *stack)
688{
689#if CORO_FIBER
690 /* nop */
691#else
692 #if CORO_USE_VALGRIND
693 VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
694 #endif
695
696 #if CORO_MMAP
697 if (stack->sptr)
698 munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
699 stack->ssze + CORO_GUARDPAGES * PAGESIZE);
700 #else
701 free (stack->sptr);
702 #endif
703#endif
704}
705
706#endif
707

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines