ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libcoro/coro.c
(Generate patch)

Comparing libcoro/coro.c (file contents):
Revision 1.31 by root, Thu Apr 24 12:40:38 2008 UTC vs.
Revision 1.52 by root, Fri Apr 2 20:21:21 2010 UTC

1/* 1/*
2 * Copyright (c) 2001-2008 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2001-2009 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
38 * go to Ralf S. Engelschall <rse@engelschall.com>. 38 * go to Ralf S. Engelschall <rse@engelschall.com>.
39 */ 39 */
40 40
41#include "coro.h" 41#include "coro.h"
42 42
43#include <string.h>
44
45/*****************************************************************************/
46/* ucontext/setjmp/asm backends */
47/*****************************************************************************/
48#if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
49
50# if CORO_UCONTEXT
51# include <stddef.h>
52# endif
53
43#if !defined(STACK_ADJUST_PTR) 54# if !defined(STACK_ADJUST_PTR)
55# if __sgi
44/* IRIX is decidedly NON-unix */ 56/* IRIX is decidedly NON-unix */
45# if __sgi
46# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 57# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
47# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 58# define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8)
48# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) 59# elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER)
49# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 60# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss))
50# define STACK_ADJUST_SIZE(sp,ss) (ss) 61# define STACK_ADJUST_SIZE(sp,ss) (ss)
51# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) 62# elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER)
52# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 63# define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8)
53# define STACK_ADJUST_SIZE(sp,ss) (ss) 64# define STACK_ADJUST_SIZE(sp,ss) (ss)
54# else 65# else
55# define STACK_ADJUST_PTR(sp,ss) (sp) 66# define STACK_ADJUST_PTR(sp,ss) (sp)
56# define STACK_ADJUST_SIZE(sp,ss) (ss) 67# define STACK_ADJUST_SIZE(sp,ss) (ss)
57# endif 68# endif
58#endif 69# endif
59 70
60#if CORO_UCONTEXT
61# include <stddef.h>
62#endif
63
64#if CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
65
66#include <stdlib.h> 71# include <stdlib.h>
67 72
68#if CORO_SJLJ 73# if CORO_SJLJ
69# include <stdio.h> 74# include <stdio.h>
70# include <signal.h> 75# include <signal.h>
71# include <unistd.h> 76# include <unistd.h>
72#endif 77# endif
73 78
74static volatile coro_func coro_init_func; 79static coro_func coro_init_func;
75static volatile void *coro_init_arg; 80static void *coro_init_arg;
76static volatile coro_context *new_coro, *create_coro; 81static coro_context *new_coro, *create_coro;
77
78/* what we really want to detect here is wether we use a new-enough version of GAS */
79/* instead, check for gcc 3, ELF and GNU/Linux and hope for the best */
80#if __GNUC__ >= 3 && __ELF__ && __linux__
81# define HAVE_CFI 1
82#endif
83 82
84static void 83static void
85coro_init (void) 84coro_init (void)
86{ 85{
87 volatile coro_func func = coro_init_func; 86 volatile coro_func func = coro_init_func;
88 volatile void *arg = coro_init_arg; 87 volatile void *arg = coro_init_arg;
89 88
90 coro_transfer ((coro_context *)new_coro, (coro_context *)create_coro); 89 coro_transfer (new_coro, create_coro);
91 90
92 func ((void *)arg); 91 func ((void *)arg);
93 92
94 /* the new coro returned. bad. just abort() for now */ 93 /* the new coro returned. bad. just abort() for now */
95 abort (); 94 abort ();
96} 95}
97 96
98# if CORO_SJLJ 97# if CORO_SJLJ
99 98
100static volatile int trampoline_count; 99static volatile int trampoline_done;
101 100
102/* trampoline signal handler */ 101/* trampoline signal handler */
103static void 102static void
104trampoline (int sig) 103trampoline (int sig)
105{ 104{
106 if (setjmp (((coro_context *)new_coro)->env)) 105 if (coro_setjmp (new_coro->env))
107 {
108#if HAVE_CFI
109 asm (".cfi_startproc");
110#endif
111 coro_init (); /* start it */ 106 coro_init (); /* start it */
112#if HAVE_CFI
113 asm (".cfi_endproc");
114#endif
115 }
116 else 107 else
117 trampoline_count++; 108 trampoline_done = 1;
118} 109}
119 110
120# endif
121
122#endif 111# endif
123 112
124#if CORO_ASM 113# if CORO_ASM
125void __attribute__((__noinline__, __fastcall__)) 114
126coro_transfer (struct coro_context *prev, struct coro_context *next) 115 asm (
127{ 116 ".text\n"
128 asm volatile ( 117 ".globl coro_transfer\n"
118 ".type coro_transfer, @function\n"
119 "coro_transfer:\n"
120 /* windows, of course, gives a shit on the amd64 ABI and uses different registers */
121 /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */
129#if __amd64 122 #if __amd64
130# define NUM_CLOBBERED 5 123 #define NUM_SAVED 6
124 "\tpush %rbp\n"
131 "push %%rbx\n\t" 125 "\tpush %rbx\n"
132 "push %%r12\n\t" 126 "\tpush %r12\n"
133 "push %%r13\n\t" 127 "\tpush %r13\n"
134 "push %%r14\n\t" 128 "\tpush %r14\n"
135 "push %%r15\n\t" 129 "\tpush %r15\n"
136 "mov %%rsp, %0\n\t" 130 #if CORO_WIN_TIB
137 "mov %1, %%rsp\n\t" 131 "\tpush %gs:0x0\n"
132 "\tpush %gs:0x8\n"
133 "\tpush %gs:0xc\n"
134 #endif
135 "\tmov %rsp, (%rdi)\n"
136 "\tmov (%rsi), %rsp\n"
137 #if CORO_WIN_TIB
138 "\tpop %gs:0xc\n"
139 "\tpop %gs:0x8\n"
140 "\tpop %gs:0x0\n"
141 #endif
138 "pop %%r15\n\t" 142 "\tpop %r15\n"
139 "pop %%r14\n\t" 143 "\tpop %r14\n"
140 "pop %%r13\n\t" 144 "\tpop %r13\n"
141 "pop %%r12\n\t" 145 "\tpop %r12\n"
142 "pop %%rbx\n\t" 146 "\tpop %rbx\n"
147 "\tpop %rbp\n"
143#elif __i386 148 #elif __i386
144# define NUM_CLOBBERED 4 149 #define NUM_SAVED 4
145 "push %%ebx\n\t"
146 "push %%esi\n\t"
147 "push %%edi\n\t"
148 "push %%ebp\n\t" 150 "\tpush %ebp\n"
149 "mov %%esp, %0\n\t" 151 "\tpush %ebx\n"
150 "mov %1, %%esp\n\t" 152 "\tpush %esi\n"
151 "pop %%ebp\n\t" 153 "\tpush %edi\n"
154 #if CORO_WIN_TIB
155 "\tpush %fs:0\n"
156 "\tpush %fs:4\n"
157 "\tpush %fs:8\n"
158 #endif
159 "\tmov %esp, (%eax)\n"
160 "\tmov (%edx), %esp\n"
161 #if CORO_WIN_TIB
162 "\tpop %fs:8\n"
163 "\tpop %fs:4\n"
164 "\tpop %fs:0\n"
165 #endif
152 "pop %%edi\n\t" 166 "\tpop %edi\n"
153 "pop %%esi\n\t" 167 "\tpop %esi\n"
154 "pop %%ebx\n\t" 168 "\tpop %ebx\n"
155#else 169 "\tpop %ebp\n"
170 #else
156# error unsupported architecture 171 #error unsupported architecture
157#endif 172 #endif
158 : "=m" (prev->sp) 173 "\tret\n"
159 : "m" (next->sp)
160 ); 174 );
161} 175
162#endif 176# endif
163 177
164#if CORO_PTHREAD 178void
165 179coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
166struct coro_init_args {
167 coro_func func;
168 void *arg;
169 coro_context *self, *main;
170};
171
172pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
173
174static void *
175trampoline (void *args_)
176{ 180{
177 struct coro_init_args *args = (struct coro_init_args *)args_; 181 coro_context nctx;
178 coro_func func = args->func;
179 void *arg = args->arg;
180
181 pthread_mutex_lock (&coro_mutex);
182 pthread_cond_destroy (&args->self->c);
183 coro_transfer (args->self, args->main);
184 func (arg);
185 pthread_mutex_unlock (&coro_mutex);
186
187 return 0;
188}
189
190void coro_transfer(coro_context *prev, coro_context *next)
191{
192 pthread_cond_init (&prev->c, 0);
193 pthread_cond_signal (&next->c);
194 pthread_cond_wait (&prev->c, &coro_mutex);
195 pthread_cond_destroy (&prev->c);
196}
197
198#endif
199
200/* initialize a machine state */
201void coro_create (coro_context *ctx,
202 coro_func coro, void *arg,
203 void *sptr, long ssize)
204{
205#if CORO_UCONTEXT
206
207 getcontext (&(ctx->uc));
208
209 ctx->uc.uc_link = 0;
210 ctx->uc.uc_stack.ss_sp = STACK_ADJUST_PTR (sptr,ssize);
211 ctx->uc.uc_stack.ss_size = (size_t) STACK_ADJUST_SIZE (sptr,ssize);
212 ctx->uc.uc_stack.ss_flags = 0;
213
214 makecontext (&(ctx->uc), (void (*)()) coro, 1, arg);
215
216#elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM
217
218# if CORO_SJLJ 182# if CORO_SJLJ
219 stack_t ostk, nstk; 183 stack_t ostk, nstk;
220 struct sigaction osa, nsa; 184 struct sigaction osa, nsa;
221 sigset_t nsig, osig; 185 sigset_t nsig, osig;
222# endif 186# endif
223 coro_context nctx; 187
188 if (!coro)
189 return;
224 190
225 coro_init_func = coro; 191 coro_init_func = coro;
226 coro_init_arg = arg; 192 coro_init_arg = arg;
227 193
228 new_coro = ctx; 194 new_coro = ctx;
254 { 220 {
255 perror ("sigaltstack"); 221 perror ("sigaltstack");
256 abort (); 222 abort ();
257 } 223 }
258 224
259 trampoline_count = 0; 225 trampoline_done = 0;
260 kill (getpid (), SIGUSR2); 226 kill (getpid (), SIGUSR2);
261 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); 227 sigfillset (&nsig); sigdelset (&nsig, SIGUSR2);
262 228
263 while (!trampoline_count) 229 while (!trampoline_done)
264 sigsuspend (&nsig); 230 sigsuspend (&nsig);
265 231
266 sigaltstack (0, &nstk); 232 sigaltstack (0, &nstk);
267 nstk.ss_flags = SS_DISABLE; 233 nstk.ss_flags = SS_DISABLE;
268 if (sigaltstack (&nstk, 0) < 0) 234 if (sigaltstack (&nstk, 0) < 0)
274 240
275 if (~ostk.ss_flags & SS_DISABLE) 241 if (~ostk.ss_flags & SS_DISABLE)
276 sigaltstack (&ostk, 0); 242 sigaltstack (&ostk, 0);
277 243
278 sigaction (SIGUSR2, &osa, 0); 244 sigaction (SIGUSR2, &osa, 0);
279
280 sigprocmask (SIG_SETMASK, &osig, 0); 245 sigprocmask (SIG_SETMASK, &osig, 0);
281 246
282# elif CORO_LOSER 247# elif CORO_LOSER
283 248
284 setjmp (ctx->env); 249 coro_setjmp (ctx->env);
285#if __CYGWIN__ 250 #if __CYGWIN__ && __i386
286 ctx->env[7] = (long)((char *)sptr + ssize); 251 ctx->env[8] = (long) coro_init;
287 ctx->env[8] = (long)coro_init; 252 ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long);
253 #elif __CYGWIN__ && __x86_64
254 ctx->env[7] = (long) coro_init;
255 ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long);
256 #elif defined(__MINGW32__)
257 ctx->env[5] = (long) coro_init;
258 ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long);
288#elif defined(_M_IX86) 259 #elif defined(_M_IX86)
289 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long)coro_init; 260 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init;
290 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long)STACK_ADJUST_PTR (sptr, ssize); 261 ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
291#elif defined(_M_AMD64) 262 #elif defined(_M_AMD64)
292 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64)coro_init; 263 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init;
293 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64)STACK_ADJUST_PTR (sptr, ssize); 264 ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
294#elif defined(_M_IA64) 265 #elif defined(_M_IA64)
295 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64)coro_init; 266 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init;
296 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64)STACK_ADJUST_PTR (sptr, ssize); 267 ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64);
297#else 268 #else
298# error "microsoft libc or architecture not supported" 269 #error "microsoft libc or architecture not supported"
270 #endif
271
272# elif CORO_LINUX
273
274 coro_setjmp (ctx->env);
275 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
276 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
277 ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
278 #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
279 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
280 ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long);
281 #elif defined (__GNU_LIBRARY__) && defined (__i386__)
282 ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init;
283 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
284 #elif defined (__GNU_LIBRARY__) && defined (__amd64__)
285 ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init;
286 ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long);
287 #else
288 #error "linux libc or architecture not supported"
289 #endif
290
291# elif CORO_IRIX
292
293 coro_setjmp (ctx->env, 0);
294 ctx->env[JB_PC] = (__uint64_t)coro_init;
295 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long);
296
297# elif CORO_ASM
298
299 ctx->sp = (void **)(ssize + (char *)sptr);
300 *--ctx->sp = (void *)abort; /* needed for alignment only */
301 *--ctx->sp = (void *)coro_init;
302
303 #if CORO_WIN_TIB
304 *--ctx->sp = 0; /* ExceptionList */
305 *--ctx->sp = (char *)sptr + ssize; /* StackBase */
306 *--ctx->sp = sptr; /* StackLimit */
307 #endif
308
309 ctx->sp -= NUM_SAVED;
310
311# elif CORO_UCONTEXT
312
313 getcontext (&(ctx->uc));
314
315 ctx->uc.uc_link = 0;
316 ctx->uc.uc_stack.ss_sp = sptr;
317 ctx->uc.uc_stack.ss_size = (size_t)ssize;
318 ctx->uc.uc_stack.ss_flags = 0;
319
320 makecontext (&(ctx->uc), (void (*)())coro_init, 0);
321
322# endif
323
324 coro_transfer (create_coro, new_coro);
325}
326
327/*****************************************************************************/
328/* pthread backend */
329/*****************************************************************************/
330#elif CORO_PTHREAD
331
332/* this mutex will be locked by the running coroutine */
333pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER;
334
335struct coro_init_args
336{
337 coro_func func;
338 void *arg;
339 coro_context *self, *main;
340};
341
342static pthread_t null_tid;
343
344/* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */
345static void
346mutex_unlock_wrapper (void *arg)
347{
348 pthread_mutex_unlock ((pthread_mutex_t *)arg);
349}
350
351static void *
352coro_init (void *args_)
353{
354 struct coro_init_args *args = (struct coro_init_args *)args_;
355 coro_func func = args->func;
356 void *arg = args->arg;
357
358 pthread_mutex_lock (&coro_mutex);
359
360 /* we try to be good citizens and use deferred cancellation and cleanup handlers */
361 pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex);
362 coro_transfer (args->self, args->main);
363 func (arg);
364 pthread_cleanup_pop (1);
365
366 return 0;
367}
368
369void
370coro_transfer (coro_context *prev, coro_context *next)
371{
372 pthread_cond_signal (&next->cv);
373 pthread_cond_wait (&prev->cv, &coro_mutex);
374#if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */
375 pthread_testcancel ();
299#endif 376#endif
377}
300 378
301# elif CORO_LINUX 379void
302 380coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, long ssize)
303 _setjmp (ctx->env); 381{
304#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP)
305 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
306 ctx->env[0].__jmpbuf[JB_SP] = (long)STACK_ADJUST_PTR (sptr, ssize);
307#elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__)
308 ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init;
309 ctx->env[0].__jmpbuf[0].__sp = (int *)((char *)sptr + ssize);
310#elif defined (__GNU_LIBRARY__) && defined (__i386__)
311 ctx->env[0].__jmpbuf[0].__pc = (char *)coro_init;
312 ctx->env[0].__jmpbuf[0].__sp = (void *)((char *)sptr + ssize);
313#elif defined (__GNU_LIBRARY__) && defined (__amd64__)
314 ctx->env[0].__jmpbuf[JB_PC] = (long)coro_init;
315 ctx->env[0].__jmpbuf[JB_RSP] = (long)STACK_ADJUST_PTR (sptr, ssize);
316#else
317# error "linux libc or architecture not supported"
318#endif
319
320# elif CORO_IRIX
321
322 setjmp (ctx->env);
323 ctx->env[JB_PC] = (__uint64_t)coro_init;
324 ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize);
325
326# elif CORO_ASM
327
328 ctx->sp = (volatile void **)(ssize + (char *)sptr);
329 *--ctx->sp = (void *)coro_init;
330 *--ctx->sp = (void *)coro_init; // this is needed when the prologue saves ebp
331 ctx->sp -= NUM_CLOBBERED;
332
333# endif
334
335 coro_transfer ((coro_context *)create_coro, (coro_context *)new_coro);
336
337# elif CORO_PTHREAD
338
339 pthread_t id;
340 pthread_attr_t attr;
341 coro_context nctx; 382 static coro_context nctx;
342 struct coro_init_args args;
343 static int once; 383 static int once;
344 384
345 if (!once) 385 if (!once)
346 { 386 {
387 once = 1;
388
347 pthread_mutex_lock (&coro_mutex); 389 pthread_mutex_lock (&coro_mutex);
348 once = 1; 390 pthread_cond_init (&nctx.cv, 0);
391 null_tid = pthread_self ();
349 } 392 }
350 393
394 pthread_cond_init (&ctx->cv, 0);
395
396 if (coro)
397 {
398 pthread_attr_t attr;
399 struct coro_init_args args;
400
351 args.func = coro; 401 args.func = coro;
352 args.arg = arg; 402 args.arg = arg;
353 args.self = ctx; 403 args.self = ctx;
354 args.main = &nctx; 404 args.main = &nctx;
355 405
356 pthread_attr_init (&attr); 406 pthread_attr_init (&attr);
357 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
358 pthread_attr_setstack (&attr, sptr, (size_t)ssize); 407 pthread_attr_setstack (&attr, sptr, (size_t)ssize);
408 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
359 pthread_create (&id, &attr, trampoline, &args); 409 pthread_create (&ctx->id, &attr, coro_init, &args);
360 410
361 pthread_cond_init (&args.self->c, 0);
362 coro_transfer (args.main, args.self); 411 coro_transfer (args.main, args.self);
412 }
413 else
414 ctx->id = null_tid;
415}
416
417void
418coro_destroy (coro_context *ctx)
419{
420 if (!pthread_equal (ctx->id, null_tid))
421 {
422 pthread_cancel (ctx->id);
423 pthread_mutex_unlock (&coro_mutex);
424 pthread_join (ctx->id, 0);
425 pthread_mutex_lock (&coro_mutex);
426 }
427
428 pthread_cond_destroy (&ctx->cv);
429}
363 430
364#else 431#else
365# error unsupported backend 432# error unsupported backend
366#endif 433#endif
367}
368 434

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines