ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.31 by root, Thu Apr 24 12:40:38 2008 UTC vs.
Revision 1.39 by root, Sat Nov 8 04:31:28 2008 UTC

38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <string.h>
44
45/*****************************************************************************/
46/* ucontext/setjmp/asm backends */
47/*****************************************************************************/
48#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
49
50# if CORO_UCONTEXT
51# include <stddef.h>
52# endif
53
43#if !defined(STACK_ADJUST_PTR) 54# if !defined(STACK_ADJUST_PTR)
55# if __sgi
44/* IRIX is decidedly NON-unix */ 56/* IRIX is decidedly NON-unix */
45# if __sgi
46# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 57# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
47# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 58# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
48# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) 59# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
49# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 60# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
50# define STACK_ADJUST_SIZE(sp,ss) (ss) 61# define STACK_ADJUST_SIZE(sp,ss) (ss)
51# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) 62# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
52# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 63# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
53# define STACK_ADJUST_SIZE(sp,ss) (ss) 64# define STACK_ADJUST_SIZE(sp,ss) (ss)
54# else 65# else
55# define STACK_ADJUST_PTR(sp,ss) (sp) 66# define STACK_ADJUST_PTR(sp,ss) (sp)
56# define STACK_ADJUST_SIZE(sp,ss) (ss) 67# define STACK_ADJUST_SIZE(sp,ss) (ss)
57# endif 68# endif
58#endif 69# endif
59 70
60#if CORO_UCONTEXT
61# include <stddef.h>
62#endif
63
64#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
65
66#include <stdlib.h> 71# include <stdlib.h>
67 72
68#if CORO_SJLJ 73# if CORO_SJLJ
69# include <stdio.h> 74# include <stdio.h>
70# include <signal.h> 75# include <signal.h>
71# include <unistd.h> 76# include <unistd.h>
72#endif 77# endif
73 78
74static volatile coro_func coro_init_func; 79static volatile coro_func coro_init_func;
75static volatile void *coro_init_arg; 80static volatile void *coro_init_arg;
76static volatile coro_context *new_coro, *create_coro; 81static volatile coro_context *new_coro, *create_coro;
77 82
78/* what we really want to detect here is wether we use a new-enough version of GAS */ 83/* what we really want to detect here is wether we use a new-enough version of GAS */
79/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */ 84/* with dwarf debug info. instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
80#if __GNUC__ >= 3 && __ELF__ && __linux__ 85# if __GNUC__ >= 3 && __ELF__ && __linux__
81# define HAVE_CFI 1 86# define HAVE_CFI 1
82#endif 87# endif
83 88
84static void 89static void
85coro_init (void) 90coro_init (void)
86{ 91{
87 volatile coro_func func = coro_init_func; 92 volatile coro_func func = coro_init_func;
95 abort (); 100 abort ();
96} 101}
97 102
98# if CORO_SJLJ 103# if CORO_SJLJ
99 104
100static volatile int trampoline_count; 105static volatile int trampoline_done;
101 106
102/* trampoline signal handler */ 107/* trampoline signal handler */
103static void 108static void
104trampoline (int sig) 109trampoline (int sig)
105{ 110{
106 if (setjmp (((coro_context *)new_coro)->env)) 111 if (
112 #if _XOPEN_UNIX > 0
113 _setjmp (new_coro->env)
114 #else
115 setjmp (new_coro->env)
116 #endif
117 ) {
118 #if HAVE_CFI
119 asm (".cfi_startproc");
120 #endif
121 coro_init (); /* start it */
122 #if HAVE_CFI
123 asm (".cfi_endproc");
124 #endif
125 }
126 else
127 trampoline_done = 1;
128}
129
130# endif
131
132# if CORO_ASM
133
134 asm (
135 ".text\n"
136 ".globl coro_transfer\n"
137 ".type coro_transfer, @function\n"
138 "coro_transfer:\n"
139 #if __amd64
140 #define NUM_SAVED 6
141 "\tpush %rbp\n"
142 "\tpush %rbx\n"
143 "\tpush %r12\n"
144 "\tpush %r13\n"
145 "\tpush %r14\n"
146 "\tpush %r15\n"
147 "\tmov %rsp, (%rdi)\n"
148 "\tmov (%rsi), %rsp\n"
149 "\tpop %r15\n"
150 "\tpop %r14\n"
151 "\tpop %r13\n"
152 "\tpop %r12\n"
153 "\tpop %rbx\n"
154 "\tpop %rbp\n"
155 #elif __i386
156 #define NUM_SAVED 4
157 "\tpush %ebp\n"
158 "\tpush %ebx\n"
159 "\tpush %esi\n"
160 "\tpush %edi\n"
161 "\tmov %esp, (%eax)\n"
162 "\tmov (%edx), %esp\n"
163 "\tpop %edi\n"
164 "\tpop %esi\n"
165 "\tpop %ebx\n"
166 "\tpop %ebp\n"
167 #else
168 #error unsupported architecture
169 #endif
170 "\tret\n"
171 );
172
173# endif
174
175void
176coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
177{
178 coro_context nctx;
179# if CORO_SJLJ
180 stack_t ostk, nstk;
181 struct sigaction osa, nsa;
182 sigset_t nsig, osig;
183# endif
184
185 if (!coro)
186 return;
187
188 coro_init_func = coro;
189 coro_init_arg = arg;
190
191 new_coro = ctx;
192 create_coro = &nctx;
193
194# if CORO_SJLJ
195 /* we use SIGUSR2. first block it, then fiddle with it. */
196
197 sigemptyset (&nsig);
198 sigaddset (&nsig, SIGUSR2);
199 sigprocmask (SIG_BLOCK, &nsig, &osig);
200
201 nsa.sa_handler = trampoline;
202 sigemptyset (&nsa.sa_mask);
203 nsa.sa_flags = SA_ONSTACK;
204
205 if (sigaction (SIGUSR2, &nsa, &osa))
107 { 206 {
108#if HAVE_CFI 207 perror ("sigaction");
109 asm (".cfi_startproc"); 208 abort ();
209 }
210
211 /* set the new stack */
212 nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */
213 nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize);
214 nstk.ss_flags = 0;
215
216 if (sigaltstack (&nstk, &ostk) < 0)
217 {
218 perror ("sigaltstack");
219 abort ();
220 }
221
222 trampoline_done = 0;
223 kill (getpid (), SIGUSR2);
224 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
225
226 while (!trampoline_done)
227 sigsuspend (&nsig);
228
229 sigaltstack (0, &nstk);
230 nstk.ss_flags = SS_DISABLE;
231 if (sigaltstack (&nstk, 0) < 0)
232 perror ("sigaltstack");
233
234 sigaltstack (0, &nstk);
235 if (~nstk.ss_flags & SS_DISABLE)
236 abort ();
237
238 if (~ostk.ss_flags & SS_DISABLE)
239 sigaltstack (&ostk, 0);
240
241 sigaction (SIGUSR2, &osa, 0);
242 sigprocmask (SIG_SETMASK, &osig, 0);
243
244# elif CORO_LOSER
245
246 setjmp (ctx->env);
247 #if __CYGWIN__
248 ctx->env[7] = (long)((char *)sptr + ssize) - sizeof (long);
249 ctx->env[8] = (long)coro_init;
250 #elif defined(_M_IX86)
251 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
252 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
253 #elif defined(_M_AMD64)
254 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
255 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
256 #elif defined(_M_IA64)
257 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
258 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
259 #else
260 #error "microsoft libc or architecture not supported"
261 #endif
262
263# elif CORO_LINUX
264
265 _setjmp (ctx->env);
266 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
267 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
268 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
269 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
270 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
271 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize) - sizeof (long);
272 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
273 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
274 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
275 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
276 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
277 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize) - sizeof (long);
278 #else
279 #error "linux libc or architecture not supported"
280 #endif
281
282# elif CORO_IRIX
283
284 setjmp (ctx->env);
285 ctx->env[JB_PC] = (__uint64_t)coro_init;
286 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
287
288# elif CORO_ASM
289
290 ctx->sp = (volatile void **)(ssize + (char *)sptr);
291 *--ctx->sp = (void *)abort; /* needed for alignment only */
292 *--ctx->sp = (void *)coro_init;
293 ctx->sp -= NUM_SAVED;
294
295# elif CORO_UCONTEXT
296
297 getcontext (&(ctx->uc));
298
299 ctx->uc.uc_link = 0;
300 ctx->uc.uc_stack.ss_sp = sptr;
301 ctx->uc.uc_stack.ss_size = (size_t)ssize;
302 ctx->uc.uc_stack.ss_flags = 0;
303
304 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
305
306# endif
307
308 coro_transfer (create_coro, new_coro);
309}
310
110#endif 311#endif
111 coro_init (); /* start it */
112#if HAVE_CFI
113 asm (".cfi_endproc");
114#endif
115 }
116 else
117 trampoline_count++;
118}
119 312
120# endif 313/*****************************************************************************/
121 314/* pthread backend */
122#endif 315/*****************************************************************************/
123
124#if CORO_ASM
125void __attribute__((__noinline__, __fastcall__))
126coro_transfer (struct coro_context *prev, struct coro_context *next)
127{
128 asm volatile (
129#if __amd64
130# define NUM_CLOBBERED 5
131 "push %%rbx\n\t"
132 "push %%r12\n\t"
133 "push %%r13\n\t"
134 "push %%r14\n\t"
135 "push %%r15\n\t"
136 "mov %%rsp, %0\n\t"
137 "mov %1, %%rsp\n\t"
138 "pop %%r15\n\t"
139 "pop %%r14\n\t"
140 "pop %%r13\n\t"
141 "pop %%r12\n\t"
142 "pop %%rbx\n\t"
143#elif __i386
144# define NUM_CLOBBERED 4
145 "push %%ebx\n\t"
146 "push %%esi\n\t"
147 "push %%edi\n\t"
148 "push %%ebp\n\t"
149 "mov %%esp, %0\n\t"
150 "mov %1, %%esp\n\t"
151 "pop %%ebp\n\t"
152 "pop %%edi\n\t"
153 "pop %%esi\n\t"
154 "pop %%ebx\n\t"
155#else
156# error unsupported architecture
157#endif
158 : "=m" (prev->sp)
159 : "m" (next->sp)
160 );
161}
162#endif
163 316
164#if CORO_PTHREAD 317#if CORO_PTHREAD
165 318
319/* this mutex will be locked by the running coroutine */
320pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
321
166struct coro_init_args { 322struct coro_init_args
323{
167 coro_func func; 324 coro_func func;
168 void *arg; 325 void *arg;
169 coro_context *self, *main; 326 coro_context *self, *main;
170}; 327};
171 328
172pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; 329static pthread_t null_tid;
330
331/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
332static void
333mutex_unlock_wrapper (void *arg)
334{
335 pthread_mutex_unlock ((pthread_mutex_t *)arg);
336}
173 337
174static void * 338static void *
175trampoline (void *args_) 339coro_init (void *args_)
176{ 340{
177 struct coro_init_args *args = (struct coro_init_args *)args_; 341 struct coro_init_args *args = (struct coro_init_args *)args_;
178 coro_func func = args->func; 342 coro_func func = args->func;
179 void *arg = args->arg; 343 void *arg = args->arg;
180 344
181 pthread_mutex_lock (&coro_mutex); 345 pthread_mutex_lock (&coro_mutex);
182 pthread_cond_destroy (&args->self->c); 346
347 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
348 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
183 coro_transfer (args->self, args->main); 349 coro_transfer (args->self, args->main);
184 func (arg); 350 func (arg);
185 pthread_mutex_unlock (&coro_mutex); 351 pthread_cleanup_pop (1);
186 352
187 return 0; 353 return 0;
188} 354}
189 355
356void
190void coro_transfer(coro_context *prev, coro_context *next) 357coro_transfer (coro_context *prev, coro_context *next)
191{ 358{
192 pthread_cond_init (&prev->c, 0);
193 pthread_cond_signal (&next->c); 359 pthread_cond_signal (&next->cv);
194 pthread_cond_wait (&prev->c, &coro_mutex); 360 pthread_cond_wait (&prev->cv, &coro_mutex);
195 pthread_cond_destroy (&prev->c);
196} 361}
197 362
198#endif 363void
199 364coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
200/* initialize a machine state */
201void coro_create (coro_context *ctx,
202 coro_func coro, void *arg,
203 void *sptr, long ssize)
204{ 365{
205#if CORO_UCONTEXT
206
207 getcontext (&(ctx->uc));
208
209 ctx->uc.uc_link = 0;
210 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
211 ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize);
212 ctx->uc.uc_stack.ss_flags = 0;
213
214 makecontext (&(ctx->uc), (void (*)()) coro, 1, arg);
215
216#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
217
218# if CORO_SJLJ
219 stack_t ostk, nstk;
220 struct sigaction osa, nsa;
221 sigset_t nsig, osig;
222# endif
223 coro_context nctx; 366 static coro_context nctx;
224
225 coro_init_func = coro;
226 coro_init_arg = arg;
227
228 new_coro = ctx;
229 create_coro = &nctx;
230
231# if CORO_SJLJ
232 /* we use SIGUSR2. first block it, then fiddle with it. */
233
234 sigemptyset (&nsig);
235 sigaddset (&nsig, SIGUSR2);
236 sigprocmask (SIG_BLOCK, &nsig, &osig);
237
238 nsa.sa_handler = trampoline;
239 sigemptyset (&nsa.sa_mask);
240 nsa.sa_flags = SA_ONSTACK;
241
242 if (sigaction (SIGUSR2, &nsa, &osa))
243 {
244 perror ("sigaction");
245 abort ();
246 }
247
248 /* set the new stack */
249 nstk.ss_sp = STACK_ADJUST_PTR (sptr,ssize); /* yes, some platforms (IRIX) get this wrong. */
250 nstk.ss_size = STACK_ADJUST_SIZE (sptr,ssize);
251 nstk.ss_flags = 0;
252
253 if (sigaltstack (&nstk, &ostk) < 0)
254 {
255 perror ("sigaltstack");
256 abort ();
257 }
258
259 trampoline_count = 0;
260 kill (getpid (), SIGUSR2);
261 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
262
263 while (!trampoline_count)
264 sigsuspend (&nsig);
265
266 sigaltstack (0, &nstk);
267 nstk.ss_flags = SS_DISABLE;
268 if (sigaltstack (&nstk, 0) < 0)
269 perror ("sigaltstack");
270
271 sigaltstack (0, &nstk);
272 if (~nstk.ss_flags & SS_DISABLE)
273 abort ();
274
275 if (~ostk.ss_flags & SS_DISABLE)
276 sigaltstack (&ostk, 0);
277
278 sigaction (SIGUSR2, &osa, 0);
279
280 sigprocmask (SIG_SETMASK, &osig, 0);
281
282# elif CORO_LOSER
283
284 setjmp (ctx->env);
285#if __CYGWIN__
286 ctx->env[7] = (long)((char *)sptr + ssize);
287 ctx->env[8] = (long)coro_init;
288#elif defined(_M_IX86)
289 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init;
290 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize);
291#elif defined(_M_AMD64)
292 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init;
293 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize);
294#elif defined(_M_IA64)
295 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init;
296 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize);
297#else
298# error "microsoft libc or architecture not supported"
299#endif
300
301# elif CORO_LINUX
302
303 _setjmp (ctx->env);
304#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
305 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
306 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize);
307#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
308 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
309 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize);
310#elif defined (__GNU_LIBRARY__) && defined (__i386__)
311 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
312 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize);
313#elif defined (__GNU_LIBRARY__) && defined (__amd64__)
314 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
315 ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize);
316#else
317# error "linux libc or architecture not supported"
318#endif
319
320# elif CORO_IRIX
321
322 setjmp (ctx->env);
323 ctx->env[JB_PC] = (__uint64_t)coro_init;
324 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize);
325
326# elif CORO_ASM
327
328 ctx->sp = (volatile void **)(ssize + (char *)sptr);
329 *--ctx->sp = (void *)coro_init;
330 *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp
331 ctx->sp -= NUM_CLOBBERED;
332
333# endif
334
335 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
336
337# elif CORO_PTHREAD
338
339 pthread_t id;
340 pthread_attr_t attr;
341 coro_context nctx;
342 struct coro_init_args args;
343 static int once; 367 static int once;
344 368
345 if (!once) 369 if (!once)
346 { 370 {
371 once = 1;
372
347 pthread_mutex_lock (&coro_mutex); 373 pthread_mutex_lock (&coro_mutex);
348 once = 1; 374 pthread_cond_init (&nctx.cv, 0);
375 null_tid = pthread_self ();
376 }
377
378 pthread_cond_init (&ctx->cv, 0);
379
380 if (coro)
349 } 381 {
382 pthread_attr_t attr;
383 struct coro_init_args args;
350 384
351 args.func = coro; 385 args.func = coro;
352 args.arg = arg; 386 args.arg = arg;
353 args.self = ctx; 387 args.self = ctx;
354 args.main = &nctx; 388 args.main = &nctx;
355 389
356 pthread_attr_init (&attr); 390 pthread_attr_init (&attr);
357 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
358 pthread_attr_setstack (&attr, sptr, (size_t)ssize); 391 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
392 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
359 pthread_create (&id, &attr, trampoline, &args); 393 pthread_create (&ctx->id, &attr, coro_init, &args);
360 394
361 pthread_cond_init (&args.self->c, 0);
362 coro_transfer (args.main, args.self); 395 coro_transfer (args.main, args.self);
396 }
397 else
398 ctx->id = null_tid;
399}
363 400
364#else 401void
365# error unsupported backend 402coro_destroy (coro_context *ctx)
403{
404 if (!pthread_equal (ctx->id, null_tid))
405 {
406 pthread_cancel (ctx->id);
407 pthread_mutex_unlock (&coro_mutex);
408 pthread_join (ctx->id, 0);
409 pthread_mutex_lock (&coro_mutex);
410 }
411
412 pthread_cond_destroy (&ctx->cv);
413}
414
366#endif 415#endif
367}
368 416

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines