ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/liblzf/lzf_c.c
(Generate patch)

Comparing liblzf/lzf_c.c (file contents):
Revision 1.21 by root, Tue Nov 13 08:38:56 2007 UTC vs.
Revision 1.44 by root, Tue Jun 1 09:11:33 2010 UTC

1/* 1/*
2 * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2000-2010 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
38 38
39#define HSIZE (1 << (HLOG)) 39#define HSIZE (1 << (HLOG))
40 40
41/* 41/*
42 * don't play with this unless you benchmark! 42 * don't play with this unless you benchmark!
43 * decompression is not dependent on the hash function 43 * the data format is not dependent on the hash function.
44 * the hashing function might seem strange, just believe me 44 * the hash function might seem strange, just believe me,
45 * it works ;) 45 * it works ;)
46 */ 46 */
47#ifndef FRST 47#ifndef FRST
48# define FRST(p) (((p[0]) << 8) | p[1]) 48# define FRST(p) (((p[0]) << 8) | p[1])
49# define NEXT(v,p) (((v) << 8) | p[2]) 49# define NEXT(v,p) (((v) << 8) | p[2])
50# if ULTRA_FAST
51# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1))
52# elif VERY_FAST
53# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
54# else
50# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) 55# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
51/*# define IDX(h) ((ip[0] * 121 ^ ip[1] * 33 ^ ip[2] * 1) & (HSIZE-1))*/ 56# endif
52#endif 57#endif
53/* 58/*
54 * IDX works because it is very similar to a multiplicative hash, e.g. 59 * IDX works because it is very similar to a multiplicative hash, e.g.
55 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1)) 60 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1))
56 * the latter is also quite fast on newer CPUs, and compresses similarly. 61 * the latter is also quite fast on newer CPUs, and compresses similarly.
68 73
69#define MAX_LIT (1 << 5) 74#define MAX_LIT (1 << 5)
70#define MAX_OFF (1 << 13) 75#define MAX_OFF (1 << 13)
71#define MAX_REF ((1 << 8) + (1 << 3)) 76#define MAX_REF ((1 << 8) + (1 << 3))
72 77
73#if (__i386 || __amd64) && __GNUC__ >= 3
74# define lzf_movsb(dst, src, len) \
75 asm ("rep movsb" \
76 : "=D" (dst), "=S" (src), "=c" (len) \
77 : "0" (dst), "1" (src), "2" (len));
78#endif
79
80#if __GNUC__ >= 3 78#if __GNUC__ >= 3
81# define expect(expr,value) __builtin_expect ((expr),(value)) 79# define expect(expr,value) __builtin_expect ((expr),(value))
82# define inline inline 80# define inline inline
83#else 81#else
84# define expect(expr,value) (expr) 82# define expect(expr,value) (expr)
89#define expect_true(expr) expect ((expr) != 0, 1) 87#define expect_true(expr) expect ((expr) != 0, 1)
90 88
91/* 89/*
92 * compressed format 90 * compressed format
93 * 91 *
94 * 000LLLLL <L+1> ; literal 92 * 000LLLLL <L+1> ; literal, L+1=1..33 octets
95 * LLLooooo oooooooo ; backref L 93 * LLLooooo oooooooo ; backref L+1=1..7 octets, o+1=1..4096 offset
96 * 111ooooo LLLLLLLL oooooooo ; backref L+7 94 * 111ooooo LLLLLLLL oooooooo ; backref L+8 octets, o+1=1..4096 offset
97 * 95 *
98 */ 96 */
99 97
100unsigned int 98unsigned int
101lzf_compress (const void *const in_data, unsigned int in_len, 99lzf_compress (const void *const in_data, unsigned int in_len,
106 ) 104 )
107{ 105{
108#if !LZF_STATE_ARG 106#if !LZF_STATE_ARG
109 LZF_STATE htab; 107 LZF_STATE htab;
110#endif 108#endif
111 const u8 **hslot;
112 const u8 *ip = (const u8 *)in_data; 109 const u8 *ip = (const u8 *)in_data;
113 u8 *op = (u8 *)out_data; 110 u8 *op = (u8 *)out_data;
114 const u8 *in_end = ip + in_len; 111 const u8 *in_end = ip + in_len;
115 u8 *out_end = op + out_len; 112 u8 *out_end = op + out_len;
116 const u8 *ref; 113 const u8 *ref;
117 114
118 unsigned int hval = FRST (ip); 115 /* off requires a type wide enough to hold a general pointer difference.
116 * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only
117 * works for differences within a single object). We also assume that no
118 * no bit pattern traps. Since the only platform that is both non-POSIX
119 * and fails to support both assumptions is windows 64 bit, we make a
120 * special workaround for it.
121 */
122#if defined (WIN32) && defined (_M_X64)
123 unsigned _int64 off; /* workaround for missing POSIX compliance */
124#else
119 unsigned long off; 125 unsigned long off;
120 int lit = 0; 126#endif
127 unsigned int hval;
128 int lit;
129
130 if (!in_len || !out_len)
131 return 0;
121 132
122#if INIT_HTAB 133#if INIT_HTAB
123 memset (htab, 0, sizeof (htab)); 134 memset (htab, 0, sizeof (htab));
124# if 0 135#endif
125 for (hslot = htab; hslot < htab + HSIZE; hslot++) 136
126 *hslot++ = ip; 137 lit = 0; op++; /* start run */
138
139 hval = FRST (ip);
140 while (ip < in_end - 2)
141 {
142 LZF_HSLOT *hslot;
143
144 hval = NEXT (hval, ip);
145 hslot = htab + IDX (hval);
146 ref = *hslot + LZF_HSLOT_BIAS; *hslot = ip - LZF_HSLOT_BIAS;
147
148 if (1
149#if INIT_HTAB
150 && ref < ip /* the next test will actually take care of this, but this is faster */
151#endif
152 && (off = ip - ref - 1) < MAX_OFF
153 && ref > (u8 *)in_data
154 && ref[2] == ip[2]
155#if STRICT_ALIGN
156 && ((ref[1] << 8) | ref[0]) == ((ip[1] << 8) | ip[0])
157#else
158 && *(u16 *)ref == *(u16 *)ip
159#endif
160 )
161 {
162 /* match found at *ref++ */
163 unsigned int len = 2;
164 unsigned int maxlen = in_end - ip - len;
165 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
166
167 if (expect_false (op + 3 + 1 >= out_end)) /* first a faster conservative test */
168 if (op - !lit + 3 + 1 >= out_end) /* second the exact but rare test */
169 return 0;
170
171 op [- lit - 1] = lit - 1; /* stop run */
172 op -= !lit; /* undo run if length is zero */
173
174 for (;;)
175 {
176 if (expect_true (maxlen > 16))
177 {
178 len++; if (ref [len] != ip [len]) break;
179 len++; if (ref [len] != ip [len]) break;
180 len++; if (ref [len] != ip [len]) break;
181 len++; if (ref [len] != ip [len]) break;
182
183 len++; if (ref [len] != ip [len]) break;
184 len++; if (ref [len] != ip [len]) break;
185 len++; if (ref [len] != ip [len]) break;
186 len++; if (ref [len] != ip [len]) break;
187
188 len++; if (ref [len] != ip [len]) break;
189 len++; if (ref [len] != ip [len]) break;
190 len++; if (ref [len] != ip [len]) break;
191 len++; if (ref [len] != ip [len]) break;
192
193 len++; if (ref [len] != ip [len]) break;
194 len++; if (ref [len] != ip [len]) break;
195 len++; if (ref [len] != ip [len]) break;
196 len++; if (ref [len] != ip [len]) break;
197 }
198
199 do
200 len++;
201 while (len < maxlen && ref[len] == ip[len]);
202
203 break;
204 }
205
206 len -= 2; /* len is now #octets - 1 */
207 ip++;
208
209 if (len < 7)
210 {
211 *op++ = (off >> 8) + (len << 5);
212 }
213 else
214 {
215 *op++ = (off >> 8) + ( 7 << 5);
216 *op++ = len - 7;
217 }
218
219 *op++ = off;
220
221 lit = 0; op++; /* start run */
222
223 ip += len + 1;
224
225 if (expect_false (ip >= in_end - 2))
226 break;
227
228#if ULTRA_FAST || VERY_FAST
229 --ip;
230# if VERY_FAST && !ULTRA_FAST
231 --ip;
127# endif 232# endif
233 hval = FRST (ip);
234
235 hval = NEXT (hval, ip);
236 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
237 ip++;
238
239# if VERY_FAST && !ULTRA_FAST
240 hval = NEXT (hval, ip);
241 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
242 ip++;
128#endif 243# endif
244#else
245 ip -= len + 1;
129 246
130 for (;;) 247 do
248 {
249 hval = NEXT (hval, ip);
250 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
251 ip++;
252 }
253 while (len--);
254#endif
255 }
256 else
257 {
258 /* one more literal byte we must copy */
259 if (expect_false (op >= out_end))
260 return 0;
261
262 lit++; *op++ = *ip++;
263
264 if (expect_false (lit == MAX_LIT))
265 {
266 op [- lit - 1] = lit - 1; /* stop run */
267 lit = 0; op++; /* start run */
268 }
269 }
270 }
271
272 if (op + 3 > out_end) /* at most 3 bytes can be missing here */
273 return 0;
274
275 while (ip < in_end)
131 { 276 {
132 if (expect_true (ip < in_end - 2)) 277 lit++; *op++ = *ip++;
133 {
134 hval = NEXT (hval, ip);
135 hslot = htab + IDX (hval);
136 ref = *hslot; *hslot = ip;
137
138 if (1
139#if INIT_HTAB && !USE_MEMCPY
140 && ref < ip /* the next test will actually take care of this, but this is faster */
141#endif
142 && (off = ip - ref - 1) < MAX_OFF
143 && ip + 4 < in_end
144 && ref > (u8 *)in_data
145#if STRICT_ALIGN
146 && ref[0] == ip[0]
147 && ref[1] == ip[1]
148 && ref[2] == ip[2]
149#else
150 && *(u16 *)ref == *(u16 *)ip
151 && ref[2] == ip[2]
152#endif
153 )
154 {
155 /* match found at *ref++ */
156 unsigned int len = 2;
157 unsigned int maxlen = in_end - ip - len;
158 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
159
160 if (expect_false (op + lit + 1 + 3 >= out_end))
161 return 0;
162
163 if (expect_false (lit))
164 {
165 *op++ = lit - 1;
166 lit = -lit;
167 do
168 *op++ = ip[lit];
169 while (expect_false (++lit));
170 }
171
172 for (;;)
173 {
174 if (expect_true (maxlen > 16))
175 {
176 len++; if (ref [len] != ip [len]) break;
177 len++; if (ref [len] != ip [len]) break;
178 len++; if (ref [len] != ip [len]) break;
179 len++; if (ref [len] != ip [len]) break;
180 len++; if (ref [len] != ip [len]) break;
181 len++; if (ref [len] != ip [len]) break;
182 len++; if (ref [len] != ip [len]) break;
183 len++; if (ref [len] != ip [len]) break;
184 len++; if (ref [len] != ip [len]) break;
185 len++; if (ref [len] != ip [len]) break;
186 len++; if (ref [len] != ip [len]) break;
187 len++; if (ref [len] != ip [len]) break;
188 len++; if (ref [len] != ip [len]) break;
189 len++; if (ref [len] != ip [len]) break;
190 len++; if (ref [len] != ip [len]) break;
191 len++; if (ref [len] != ip [len]) break;
192 }
193
194 do
195 len++;
196 while (len < maxlen && ref[len] == ip[len]);
197
198 break;
199 }
200
201 len -= 2;
202 ip++;
203
204 if (len < 7)
205 {
206 *op++ = (off >> 8) + (len << 5);
207 }
208 else
209 {
210 *op++ = (off >> 8) + ( 7 << 5);
211 *op++ = len - 7;
212 }
213
214 *op++ = off;
215
216#if ULTRA_FAST || VERY_FAST
217 ip += len;
218#if VERY_FAST && !ULTRA_FAST
219 --ip;
220#endif
221 hval = FRST (ip);
222
223 hval = NEXT (hval, ip);
224 htab[IDX (hval)] = ip;
225 ip++;
226
227#if VERY_FAST && !ULTRA_FAST
228 hval = NEXT (hval, ip);
229 htab[IDX (hval)] = ip;
230 ip++;
231#endif
232#else
233 do
234 {
235 hval = NEXT (hval, ip);
236 htab[IDX (hval)] = ip;
237 ip++;
238 }
239 while (len--);
240#endif
241 continue;
242 }
243 }
244 else if (expect_false (ip == in_end))
245 break;
246
247 /* one more literal byte we must copy */
248 lit++;
249 ip++;
250 278
251 if (expect_false (lit == MAX_LIT)) 279 if (expect_false (lit == MAX_LIT))
252 { 280 {
253 if (op + 1 + MAX_LIT >= out_end) 281 op [- lit - 1] = lit - 1; /* stop run */
254 return 0; 282 lit = 0; op++; /* start run */
255
256 *op++ = MAX_LIT - 1;
257
258#ifdef lzf_movsb
259 ip -= MAX_LIT;
260 lzf_movsb (op, ip, lit);
261#else
262 lit = -lit;
263 do
264 *op++ = ip[lit];
265 while (++lit);
266#endif
267 } 283 }
268 } 284 }
269 285
270 if (lit) 286 op [- lit - 1] = lit - 1; /* end run */
271 { 287 op -= !lit; /* undo run if length is zero */
272 if (op + lit + 1 >= out_end)
273 return 0;
274 288
275 *op++ = lit - 1;
276#ifdef lzf_movsb
277 ip -= lit;
278 lzf_movsb (op, ip, lit);
279#else
280 lit = -lit;
281 do
282 *op++ = ip[lit];
283 while (++lit);
284#endif
285 }
286
287 return op - (u8 *) out_data; 289 return op - (u8 *)out_data;
288} 290}
289 291

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines