ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.59 by root, Mon Jun 13 08:38:16 2011 UTC vs.
Revision 1.73 by root, Tue Aug 14 15:46:03 2018 UTC

1/* 1/*
2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2001-2011 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer. 8 * this list of conditions and the following disclaimer.
9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <stddef.h>
43#include <string.h> 44#include <string.h>
44 45
45/*****************************************************************************/ 46/*****************************************************************************/
46/* ucontext/setjmp/asm backends */ 47/* ucontext/setjmp/asm backends */
47/*****************************************************************************/ 48/*****************************************************************************/
86 volatile coro_func func = coro_init_func; 87 volatile coro_func func = coro_init_func;
87 volatile void *arg = coro_init_arg; 88 volatile void *arg = coro_init_arg;
88 89
89 coro_transfer (new_coro, create_coro); 90 coro_transfer (new_coro, create_coro);
90 91
91#if __linux && __amd64 92#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
92 /* we blindly assume on any __linux with __amd64 we have a new enough gas with .cfi_undefined support */ 93 /*asm (".cfi_startproc");*/
93 asm (".cfi_undefined rip"); 94 /*asm (".cfi_undefined rip");*/
94#endif 95#endif
95 96
96 func ((void *)arg); 97 func ((void *)arg);
98
99#if __GCC_HAVE_DWARF2_CFI_ASM && __amd64
100 /*asm (".cfi_endproc");*/
101#endif
97 102
98 /* the new coro returned. bad. just abort() for now */ 103 /* the new coro returned. bad. just abort() for now */
99 abort (); 104 abort ();
100} 105}
101 106
115 120
116# endif 121# endif
117 122
118# if CORO_ASM 123# if CORO_ASM
119 124
125 #if __arm__ && \
126 (defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
127 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ \
128 || __ARM_ARCH == 7)
129 #define CORO_ARM 1
130 #endif
131
132 #if _WIN32 || __CYGWIN__
133 #define CORO_WIN_TIB 1
134 #endif
135
120 asm ( 136 asm (
121 ".text\n" 137 "\t.text\n"
138 #if _WIN32 || __CYGWIN__
139 "\t.globl _coro_transfer\n"
140 "_coro_transfer:\n"
141 #else
122 ".globl coro_transfer\n" 142 "\t.globl coro_transfer\n"
123 ".type coro_transfer, @function\n"
124 "coro_transfer:\n" 143 "coro_transfer:\n"
144 #endif
125 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */ 145 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
126 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */ 146 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
127 #if __amd64 147 #if __amd64
128 #ifdef _WIN32 148
129 /* TODO: xmm6..15 also would need to be saved. sigh. */ 149 #if _WIN32 || __CYGWIN__
130 #define NUM_SAVED 8 150 #define NUM_SAVED 29
131 #undef CORO_WIN_TIB 151 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */
152 "\tmovaps %xmm6, (%rsp)\n"
153 "\tmovaps %xmm7, 16(%rsp)\n"
154 "\tmovaps %xmm8, 32(%rsp)\n"
155 "\tmovaps %xmm9, 48(%rsp)\n"
156 "\tmovaps %xmm10, 64(%rsp)\n"
157 "\tmovaps %xmm11, 80(%rsp)\n"
158 "\tmovaps %xmm12, 96(%rsp)\n"
159 "\tmovaps %xmm13, 112(%rsp)\n"
160 "\tmovaps %xmm14, 128(%rsp)\n"
161 "\tmovaps %xmm15, 144(%rsp)\n"
132 "\tpushq %rsi\n" 162 "\tpushq %rsi\n"
133 "\tpushq %rdi\n" 163 "\tpushq %rdi\n"
134 "\tpushq %rbp\n" 164 "\tpushq %rbp\n"
135 "\tpushq %rbx\n" 165 "\tpushq %rbx\n"
136 "\tpushq %r12\n" 166 "\tpushq %r12\n"
137 "\tpushq %r13\n" 167 "\tpushq %r13\n"
138 "\tpushq %r14\n" 168 "\tpushq %r14\n"
139 "\tpushq %r15\n" 169 "\tpushq %r15\n"
170 #if CORO_WIN_TIB
171 "\tpushq %fs:0x0\n"
172 "\tpushq %fs:0x8\n"
173 "\tpushq %fs:0xc\n"
174 #endif
140 "\tmovq %rsp, (%rcx)\n" 175 "\tmovq %rsp, (%rcx)\n"
141 "\tmovq (%rdx), %rsp\n" 176 "\tmovq (%rdx), %rsp\n"
177 #if CORO_WIN_TIB
178 "\tpopq %fs:0xc\n"
179 "\tpopq %fs:0x8\n"
180 "\tpopq %fs:0x0\n"
181 #endif
142 "\tpopq %r15\n" 182 "\tpopq %r15\n"
143 "\tpopq %r14\n" 183 "\tpopq %r14\n"
144 "\tpopq %r13\n" 184 "\tpopq %r13\n"
145 "\tpopq %r12\n" 185 "\tpopq %r12\n"
146 "\tpopq %rbx\n" 186 "\tpopq %rbx\n"
147 "\tpopq %rbp\n" 187 "\tpopq %rbp\n"
148 "\tpopq %rdi\n" 188 "\tpopq %rdi\n"
149 "\tpopq %rsi\n" 189 "\tpopq %rsi\n"
190 "\tmovaps (%rsp), %xmm6\n"
191 "\tmovaps 16(%rsp), %xmm7\n"
192 "\tmovaps 32(%rsp), %xmm8\n"
193 "\tmovaps 48(%rsp), %xmm9\n"
194 "\tmovaps 64(%rsp), %xmm10\n"
195 "\tmovaps 80(%rsp), %xmm11\n"
196 "\tmovaps 96(%rsp), %xmm12\n"
197 "\tmovaps 112(%rsp), %xmm13\n"
198 "\tmovaps 128(%rsp), %xmm14\n"
199 "\tmovaps 144(%rsp), %xmm15\n"
200 "\taddq $168, %rsp\n"
150 #else 201 #else
151 #define NUM_SAVED 6 202 #define NUM_SAVED 6
152 "\tpushq %rbp\n" 203 "\tpushq %rbp\n"
153 "\tpushq %rbx\n" 204 "\tpushq %rbx\n"
154 "\tpushq %r12\n" 205 "\tpushq %r12\n"
162 "\tpopq %r13\n" 213 "\tpopq %r13\n"
163 "\tpopq %r12\n" 214 "\tpopq %r12\n"
164 "\tpopq %rbx\n" 215 "\tpopq %rbx\n"
165 "\tpopq %rbp\n" 216 "\tpopq %rbp\n"
166 #endif 217 #endif
218 "\tpopq %rcx\n"
219 "\tjmpq *%rcx\n"
220
167 #elif __i386 221 #elif __i386__
222
168 #define NUM_SAVED 4 223 #define NUM_SAVED 4
169 "\tpushl %ebp\n" 224 "\tpushl %ebp\n"
170 "\tpushl %ebx\n" 225 "\tpushl %ebx\n"
171 "\tpushl %esi\n" 226 "\tpushl %esi\n"
172 "\tpushl %edi\n" 227 "\tpushl %edi\n"
173 #if CORO_WIN_TIB 228 #if CORO_WIN_TIB
229 #undef NUM_SAVED
230 #define NUM_SAVED 7
174 "\tpushl %fs:0\n" 231 "\tpushl %fs:0\n"
175 "\tpushl %fs:4\n" 232 "\tpushl %fs:4\n"
176 "\tpushl %fs:8\n" 233 "\tpushl %fs:8\n"
177 #endif 234 #endif
178 "\tmovl %esp, (%eax)\n" 235 "\tmovl %esp, (%eax)\n"
184 #endif 241 #endif
185 "\tpopl %edi\n" 242 "\tpopl %edi\n"
186 "\tpopl %esi\n" 243 "\tpopl %esi\n"
187 "\tpopl %ebx\n" 244 "\tpopl %ebx\n"
188 "\tpopl %ebp\n" 245 "\tpopl %ebp\n"
246 "\tpopl %ecx\n"
247 "\tjmpl *%ecx\n"
248
249 #elif CORO_ARM /* untested, what about thumb, neon, iwmmxt? */
250
251 #if __ARM_PCS_VFP
252 "\tvpush {d8-d15}\n"
253 #define NUM_SAVED (9 + 8 * 2)
254 #else
255 #define NUM_SAVED 9
256 #endif
257 "\tpush {r4-r11,lr}\n"
258 "\tstr sp, [r0]\n"
259 "\tldr sp, [r1]\n"
260 "\tpop {r4-r11,lr}\n"
261 #if __ARM_PCS_VFP
262 "\tvpop {d8-d15}\n"
263 #endif
264 "\tmov r15, lr\n"
265
266 #elif __mips__ && 0 /* untested, 32 bit only */
267
268 #define NUM_SAVED (12 + 8 * 2)
269 /* TODO: n64/o64, lw=>ld */
270
271 "\t.set nomips16\n"
272 "\t.frame $sp,112,$31\n"
273 #if __mips_soft_float
274 "\taddiu $sp,$sp,-44\n"
275 #else
276 "\taddiu $sp,$sp,-112\n"
277 "\ts.d $f30,88($sp)\n"
278 "\ts.d $f28,80($sp)\n"
279 "\ts.d $f26,72($sp)\n"
280 "\ts.d $f24,64($sp)\n"
281 "\ts.d $f22,56($sp)\n"
282 "\ts.d $f20,48($sp)\n"
283 #endif
284 "\tsw $28,40($sp)\n"
285 "\tsw $31,36($sp)\n"
286 "\tsw $fp,32($sp)\n"
287 "\tsw $23,28($sp)\n"
288 "\tsw $22,24($sp)\n"
289 "\tsw $21,20($sp)\n"
290 "\tsw $20,16($sp)\n"
291 "\tsw $19,12($sp)\n"
292 "\tsw $18,8($sp)\n"
293 "\tsw $17,4($sp)\n"
294 "\tsw $16,0($sp)\n"
295 "\tsw $sp,0($4)\n"
296 "\tlw $sp,0($5)\n"
297 #if !__mips_soft_float
298 "\tl.d $f30,88($sp)\n"
299 "\tl.d $f28,80($sp)\n"
300 "\tl.d $f26,72($sp)\n"
301 "\tl.d $f24,64($sp)\n"
302 "\tl.d $f22,56($sp)\n"
303 "\tl.d $f20,48($sp)\n"
304 #endif
305 "\tlw $28,40($sp)\n"
306 "\tlw $31,36($sp)\n"
307 "\tlw $fp,32($sp)\n"
308 "\tlw $23,28($sp)\n"
309 "\tlw $22,24($sp)\n"
310 "\tlw $21,20($sp)\n"
311 "\tlw $20,16($sp)\n"
312 "\tlw $19,12($sp)\n"
313 "\tlw $18,8($sp)\n"
314 "\tlw $17,4($sp)\n"
315 "\tlw $16,0($sp)\n"
316 "\tj $31\n"
317 #if __mips_soft_float
318 "\taddiu $sp,$sp,44\n"
319 #else
320 "\taddiu $sp,$sp,112\n"
321 #endif
322
189 #else 323 #else
190 #error unsupported architecture 324 #error unsupported architecture
191 #endif 325 #endif
192 "\tret\n"
193 ); 326 );
194 327
195# endif 328# endif
196 329
197void 330void
198coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize) 331coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
199{ 332{
200 coro_context nctx; 333 coro_context nctx;
201# if CORO_SJLJ 334# if CORO_SJLJ
202 stack_t ostk, nstk; 335 stack_t ostk, nstk;
203 struct sigaction osa, nsa; 336 struct sigaction osa, nsa;
264 sigprocmask (SIG_SETMASK, &osig, 0); 397 sigprocmask (SIG_SETMASK, &osig, 0);
265 398
266# elif CORO_LOSER 399# elif CORO_LOSER
267 400
268 coro_setjmp (ctx->env); 401 coro_setjmp (ctx->env);
269 #if __CYGWIN__ && __i386 402 #if __CYGWIN__ && __i386__
270 ctx->env[8] = (long) coro_init; 403 ctx->env[8] = (long) coro_init;
271 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); 404 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
272 #elif __CYGWIN__ && __x86_64 405 #elif __CYGWIN__ && __x86_64__
273 ctx->env[7] = (long) coro_init; 406 ctx->env[7] = (long) coro_init;
274 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); 407 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
275 #elif defined(__MINGW32__) 408 #elif defined __MINGW32__
276 ctx->env[5] = (long) coro_init; 409 ctx->env[5] = (long) coro_init;
277 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); 410 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
278 #elif defined(_M_IX86) 411 #elif defined _M_IX86
279 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; 412 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
280 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 413 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
281 #elif defined(_M_AMD64) 414 #elif defined _M_AMD64
282 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; 415 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
283 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 416 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
284 #elif defined(_M_IA64) 417 #elif defined _M_IA64
285 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; 418 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
286 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 419 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
287 #else 420 #else
288 #error "microsoft libc or architecture not supported" 421 #error "microsoft libc or architecture not supported"
289 #endif 422 #endif
298 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 431 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
299 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long); 432 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
300 #elif defined (__GNU_LIBRARY__) && defined (__i386__) 433 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
301 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init; 434 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
302 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); 435 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
303 #elif defined (__GNU_LIBRARY__) && defined (__amd64__) 436 #elif defined (__GNU_LIBRARY__) && defined (__x86_64__)
304 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; 437 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
305 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); 438 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
306 #else 439 #else
307 #error "linux libc or architecture not supported" 440 #error "linux libc or architecture not supported"
308 #endif 441 #endif
313 ctx->env[JB_PC] = (__uint64_t)coro_init; 446 ctx->env[JB_PC] = (__uint64_t)coro_init;
314 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 447 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
315 448
316# elif CORO_ASM 449# elif CORO_ASM
317 450
451 #if __i386__ || __x86_64__
318 ctx->sp = (void **)(ssize + (char *)sptr); 452 ctx->sp = (void **)(ssize + (char *)sptr);
319 *--ctx->sp = (void *)abort; /* needed for alignment only */ 453 *--ctx->sp = (void *)abort; /* needed for alignment only */
320 *--ctx->sp = (void *)coro_init; 454 *--ctx->sp = (void *)coro_init;
321
322 #if CORO_WIN_TIB 455 #if CORO_WIN_TIB
323 *--ctx->sp = 0; /* ExceptionList */ 456 *--ctx->sp = 0; /* ExceptionList */
324 *--ctx->sp = (char *)sptr + ssize; /* StackBase */ 457 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
325 *--ctx->sp = sptr; /* StackLimit */ 458 *--ctx->sp = sptr; /* StackLimit */
459 #endif
460 #elif CORO_ARM
461 /* return address stored in lr register, don't push anything */
462 #else
463 #error unsupported architecture
326 #endif 464 #endif
327 465
328 ctx->sp -= NUM_SAVED; 466 ctx->sp -= NUM_SAVED;
329 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED); 467 memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED);
468
469 #if __i386__ || __x86_64__
470 /* done already */
471 #elif CORO_ARM
472 ctx->sp[0] = coro; /* r4 */
473 ctx->sp[1] = arg; /* r5 */
474 ctx->sp[8] = (char *)coro_init; /* lr */
475 #else
476 #error unsupported architecture
477 #endif
330 478
331# elif CORO_UCONTEXT 479# elif CORO_UCONTEXT
332 480
333 getcontext (&(ctx->uc)); 481 getcontext (&(ctx->uc));
334 482
357 coro_func func; 505 coro_func func;
358 void *arg; 506 void *arg;
359 coro_context *self, *main; 507 coro_context *self, *main;
360}; 508};
361 509
362static pthread_t null_tid;
363
364/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
365static void
366mutex_unlock_wrapper (void *arg)
367{
368 pthread_mutex_unlock ((pthread_mutex_t *)arg);
369}
370
371static void * 510static void *
372coro_init (void *args_) 511coro_init (void *args_)
373{ 512{
374 struct coro_init_args *args = (struct coro_init_args *)args_; 513 struct coro_init_args *args = (struct coro_init_args *)args_;
375 coro_func func = args->func; 514 coro_func func = args->func;
376 void *arg = args->arg; 515 void *arg = args->arg;
377 516
378 pthread_mutex_lock (&coro_mutex);
379
380 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
381 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
382 coro_transfer (args->self, args->main); 517 coro_transfer (args->self, args->main);
383 func (arg); 518 func (arg);
384 pthread_cleanup_pop (1);
385 519
386 return 0; 520 return 0;
387} 521}
388 522
389void 523void
390coro_transfer (coro_context *prev, coro_context *next) 524coro_transfer (coro_context *prev, coro_context *next)
391{ 525{
526 pthread_mutex_lock (&coro_mutex);
527
528 next->flags = 1;
392 pthread_cond_signal (&next->cv); 529 pthread_cond_signal (&next->cv);
530
531 prev->flags = 0;
532
533 while (!prev->flags)
393 pthread_cond_wait (&prev->cv, &coro_mutex); 534 pthread_cond_wait (&prev->cv, &coro_mutex);
394#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */ 535
395 pthread_testcancel (); 536 if (prev->flags == 2)
396#endif 537 {
538 pthread_mutex_unlock (&coro_mutex);
539 pthread_cond_destroy (&prev->cv);
540 pthread_detach (pthread_self ());
541 pthread_exit (0);
542 }
543
544 pthread_mutex_unlock (&coro_mutex);
397} 545}
398 546
399void 547void
400coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize) 548coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
401{ 549{
402 static coro_context nctx; 550 static coro_context nctx;
403 static int once; 551 static int once;
404 552
405 if (!once) 553 if (!once)
406 { 554 {
407 once = 1; 555 once = 1;
408 556
409 pthread_mutex_lock (&coro_mutex);
410 pthread_cond_init (&nctx.cv, 0); 557 pthread_cond_init (&nctx.cv, 0);
411 null_tid = pthread_self ();
412 } 558 }
413 559
414 pthread_cond_init (&ctx->cv, 0); 560 pthread_cond_init (&ctx->cv, 0);
415 561
416 if (coro) 562 if (coro)
417 { 563 {
418 pthread_attr_t attr; 564 pthread_attr_t attr;
419 struct coro_init_args args; 565 struct coro_init_args args;
566 pthread_t id;
420 567
421 args.func = coro; 568 args.func = coro;
422 args.arg = arg; 569 args.arg = arg;
423 args.self = ctx; 570 args.self = ctx;
424 args.main = &nctx; 571 args.main = &nctx;
425 572
426 pthread_attr_init (&attr); 573 pthread_attr_init (&attr);
427#if __UCLIBC__ 574#if __UCLIBC__
428 /* exists, but is borked */ 575 /* exists, but is borked */
429 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/ 576 /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/
577#elif __CYGWIN__
578 /* POSIX, not here */
579 pthread_attr_setstacksize (&attr, (size_t)ssize);
430#else 580#else
431 pthread_attr_setstack (&attr, sptr, (size_t)ssize); 581 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
432#endif 582#endif
433 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); 583 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
434 pthread_create (&ctx->id, &attr, coro_init, &args); 584 pthread_create (&id, &attr, coro_init, &args);
435 585
436 coro_transfer (args.main, args.self); 586 coro_transfer (args.main, args.self);
437 } 587 }
438 else
439 ctx->id = null_tid;
440} 588}
441 589
442void 590void
443coro_destroy (coro_context *ctx) 591coro_destroy (coro_context *ctx)
444{ 592{
445 if (!pthread_equal (ctx->id, null_tid)) 593 pthread_mutex_lock (&coro_mutex);
594 ctx->flags = 2;
595 pthread_cond_signal (&ctx->cv);
596 pthread_mutex_unlock (&coro_mutex);
597}
598
599/*****************************************************************************/
600/* fiber backend */
601/*****************************************************************************/
602#elif CORO_FIBER
603
604#define WIN32_LEAN_AND_MEAN
605#if _WIN32_WINNT < 0x0400
606 #undef _WIN32_WINNT
607 #define _WIN32_WINNT 0x0400
608#endif
609#include <windows.h>
610
611VOID CALLBACK
612coro_init (PVOID arg)
613{
614 coro_context *ctx = (coro_context *)arg;
615
616 ctx->coro (ctx->arg);
617}
618
619void
620coro_transfer (coro_context *prev, coro_context *next)
621{
622 if (!prev->fiber)
446 { 623 {
447 pthread_cancel (ctx->id); 624 prev->fiber = GetCurrentFiber ();
448 pthread_mutex_unlock (&coro_mutex); 625
449 pthread_join (ctx->id, 0); 626 if (prev->fiber == 0 || prev->fiber == (void *)0x1e00)
450 pthread_mutex_lock (&coro_mutex); 627 prev->fiber = ConvertThreadToFiber (0);
451 } 628 }
452 629
453 pthread_cond_destroy (&ctx->cv); 630 SwitchToFiber (next->fiber);
631}
632
633void
634coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize)
635{
636 ctx->fiber = 0;
637 ctx->coro = coro;
638 ctx->arg = arg;
639
640 if (!coro)
641 return;
642
643 ctx->fiber = CreateFiber (ssize, coro_init, ctx);
644}
645
646void
647coro_destroy (coro_context *ctx)
648{
649 DeleteFiber (ctx->fiber);
454} 650}
455 651
456#else 652#else
457# error unsupported backend 653 #error unsupported backend
654#endif
655
656/*****************************************************************************/
657/* stack management */
658/*****************************************************************************/
659#if CORO_STACKALLOC
660
661#include <stdlib.h>
662
663#ifndef _WIN32
664# include <unistd.h>
665#endif
666
667#if CORO_USE_VALGRIND
668# include <valgrind/valgrind.h>
669#endif
670
671#if _POSIX_MAPPED_FILES
672# include <sys/mman.h>
673# define CORO_MMAP 1
674# ifndef MAP_ANONYMOUS
675# ifdef MAP_ANON
676# define MAP_ANONYMOUS MAP_ANON
677# else
678# undef CORO_MMAP
679# endif
458#endif 680# endif
681# include <limits.h>
682#else
683# undef CORO_MMAP
684#endif
459 685
686#if _POSIX_MEMORY_PROTECTION
687# ifndef CORO_GUARDPAGES
688# define CORO_GUARDPAGES 4
689# endif
690#else
691# undef CORO_GUARDPAGES
692#endif
693
694#if !CORO_MMAP
695# undef CORO_GUARDPAGES
696#endif
697
698#if !__i386__ && !__x86_64__ && !__powerpc__ && !__arm__ && !__aarch64__ && !__m68k__ && !__alpha__ && !__mips__ && !__sparc64__
699# undef CORO_GUARDPAGES
700#endif
701
702#ifndef CORO_GUARDPAGES
703# define CORO_GUARDPAGES 0
704#endif
705
706#if !PAGESIZE
707 #if !CORO_MMAP
708 #define PAGESIZE 4096
709 #else
710 static size_t
711 coro_pagesize (void)
712 {
713 static size_t pagesize;
714
715 if (!pagesize)
716 pagesize = sysconf (_SC_PAGESIZE);
717
718 return pagesize;
719 }
720
721 #define PAGESIZE coro_pagesize ()
722 #endif
723#endif
724
725int
726coro_stack_alloc (struct coro_stack *stack, unsigned int size)
727{
728 if (!size)
729 size = 256 * 1024;
730
731 stack->sptr = 0;
732 stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE;
733
734#if CORO_FIBER
735
736 stack->sptr = (void *)stack;
737 return 1;
738
739#else
740
741 size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE;
742 void *base;
743
744 #if CORO_MMAP
745 /* mmap supposedly does allocate-on-write for us */
746 base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
747
748 if (base == (void *)-1)
749 {
750 /* some systems don't let us have executable heap */
751 /* we assume they won't need executable stack in that case */
752 base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
753
754 if (base == (void *)-1)
755 return 0;
756 }
757
758 #if CORO_GUARDPAGES
759 mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE);
760 #endif
761
762 base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE);
763 #else
764 base = malloc (ssze);
765 if (!base)
766 return 0;
767 #endif
768
769 #if CORO_USE_VALGRIND
770 stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE);
771 #endif
772
773 stack->sptr = base;
774 return 1;
775
776#endif
777}
778
779void
780coro_stack_free (struct coro_stack *stack)
781{
782#if CORO_FIBER
783 /* nop */
784#else
785 #if CORO_USE_VALGRIND
786 VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
787 #endif
788
789 #if CORO_MMAP
790 if (stack->sptr)
791 munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
792 stack->ssze + CORO_GUARDPAGES * PAGESIZE);
793 #else
794 free (stack->sptr);
795 #endif
796#endif
797}
798
799#endif
800

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines