ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/liblzf/lzf_c.c
(Generate patch)

Comparing liblzf/lzf_c.c (file contents):
Revision 1.20 by root, Tue Nov 13 08:38:49 2007 UTC vs.
Revision 1.44 by root, Tue Jun 1 09:11:33 2010 UTC

1/* 1/*
2 * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2000-2010 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
38 38
39#define HSIZE (1 << (HLOG)) 39#define HSIZE (1 << (HLOG))
40 40
41/* 41/*
42 * don't play with this unless you benchmark! 42 * don't play with this unless you benchmark!
43 * decompression is not dependent on the hash function 43 * the data format is not dependent on the hash function.
44 * the hashing function might seem strange, just believe me 44 * the hash function might seem strange, just believe me,
45 * it works ;) 45 * it works ;)
46 */ 46 */
47#ifndef FRST 47#ifndef FRST
48# define FRST(p) (((p[0]) << 8) | p[1]) 48# define FRST(p) (((p[0]) << 8) | p[1])
49# define NEXT(v,p) (((v) << 8) | p[2]) 49# define NEXT(v,p) (((v) << 8) | p[2])
50# if ULTRA_FAST
51# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1))
52# elif VERY_FAST
53# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
54# else
50# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) 55# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
51/*# define IDX(h) ((ip[0] * 121 ^ ip[1] * 33 ^ ip[2] * 1) & (HSIZE-1))*/ 56# endif
52#endif 57#endif
53/* 58/*
54 * IDX works because it is very similar to a multiplicative hash, e.g. 59 * IDX works because it is very similar to a multiplicative hash, e.g.
55 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1)) 60 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1))
56 * the latter is also quite fast on newer CPUs, and compresses similarly. 61 * the latter is also quite fast on newer CPUs, and compresses similarly.
68 73
69#define MAX_LIT (1 << 5) 74#define MAX_LIT (1 << 5)
70#define MAX_OFF (1 << 13) 75#define MAX_OFF (1 << 13)
71#define MAX_REF ((1 << 8) + (1 << 3)) 76#define MAX_REF ((1 << 8) + (1 << 3))
72 77
73#if (__i386 || __amd64) && __GNUC__ >= 3
74# define lzf_movsb(dst, src, len) \
75 asm ("rep movsb" \
76 : "=D" (dst), "=S" (src), "=c" (len) \
77 : "0" (dst), "1" (src), "2" (len));
78#endif
79
80#if __GNUC__ >= 3 78#if __GNUC__ >= 3
81# define expect(expr,value) __builtin_expect ((expr),(value)) 79# define expect(expr,value) __builtin_expect ((expr),(value))
82# define inline inline 80# define inline inline
83#else 81#else
84# define expect(expr,value) (expr) 82# define expect(expr,value) (expr)
89#define expect_true(expr) expect ((expr) != 0, 1) 87#define expect_true(expr) expect ((expr) != 0, 1)
90 88
91/* 89/*
92 * compressed format 90 * compressed format
93 * 91 *
94 * 000LLLLL <L+1> ; literal 92 * 000LLLLL <L+1> ; literal, L+1=1..33 octets
95 * LLLooooo oooooooo ; backref L 93 * LLLooooo oooooooo ; backref L+1=1..7 octets, o+1=1..4096 offset
96 * 111ooooo LLLLLLLL oooooooo ; backref L+7 94 * 111ooooo LLLLLLLL oooooooo ; backref L+8 octets, o+1=1..4096 offset
97 * 95 *
98 */ 96 */
99 97
100unsigned int 98unsigned int
101lzf_compress (const void *const in_data, unsigned int in_len, 99lzf_compress (const void *const in_data, unsigned int in_len,
106 ) 104 )
107{ 105{
108#if !LZF_STATE_ARG 106#if !LZF_STATE_ARG
109 LZF_STATE htab; 107 LZF_STATE htab;
110#endif 108#endif
111 const u8 **hslot;
112 const u8 *ip = (const u8 *)in_data; 109 const u8 *ip = (const u8 *)in_data;
113 u8 *op = (u8 *)out_data; 110 u8 *op = (u8 *)out_data;
114 const u8 *in_end = ip + in_len; 111 const u8 *in_end = ip + in_len;
115 u8 *out_end = op + out_len; 112 u8 *out_end = op + out_len;
116 const u8 *ref; 113 const u8 *ref;
117 114
118 unsigned int hval = FRST (ip); 115 /* off requires a type wide enough to hold a general pointer difference.
116 * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only
117 * works for differences within a single object). We also assume that no
118 * no bit pattern traps. Since the only platform that is both non-POSIX
119 * and fails to support both assumptions is windows 64 bit, we make a
120 * special workaround for it.
121 */
122#if defined (WIN32) && defined (_M_X64)
123 unsigned _int64 off; /* workaround for missing POSIX compliance */
124#else
119 unsigned long off; 125 unsigned long off;
120 int lit = 0; 126#endif
127 unsigned int hval;
128 int lit;
129
130 if (!in_len || !out_len)
131 return 0;
121 132
122#if INIT_HTAB 133#if INIT_HTAB
123 memset (htab, 0, sizeof (htab)); 134 memset (htab, 0, sizeof (htab));
124# if 0 135#endif
125 for (hslot = htab; hslot < htab + HSIZE; hslot++) 136
126 *hslot++ = ip; 137 lit = 0; op++; /* start run */
138
139 hval = FRST (ip);
140 while (ip < in_end - 2)
141 {
142 LZF_HSLOT *hslot;
143
144 hval = NEXT (hval, ip);
145 hslot = htab + IDX (hval);
146 ref = *hslot + LZF_HSLOT_BIAS; *hslot = ip - LZF_HSLOT_BIAS;
147
148 if (1
149#if INIT_HTAB
150 && ref < ip /* the next test will actually take care of this, but this is faster */
151#endif
152 && (off = ip - ref - 1) < MAX_OFF
153 && ref > (u8 *)in_data
154 && ref[2] == ip[2]
155#if STRICT_ALIGN
156 && ((ref[1] << 8) | ref[0]) == ((ip[1] << 8) | ip[0])
157#else
158 && *(u16 *)ref == *(u16 *)ip
159#endif
160 )
161 {
162 /* match found at *ref++ */
163 unsigned int len = 2;
164 unsigned int maxlen = in_end - ip - len;
165 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
166
167 if (expect_false (op + 3 + 1 >= out_end)) /* first a faster conservative test */
168 if (op - !lit + 3 + 1 >= out_end) /* second the exact but rare test */
169 return 0;
170
171 op [- lit - 1] = lit - 1; /* stop run */
172 op -= !lit; /* undo run if length is zero */
173
174 for (;;)
175 {
176 if (expect_true (maxlen > 16))
177 {
178 len++; if (ref [len] != ip [len]) break;
179 len++; if (ref [len] != ip [len]) break;
180 len++; if (ref [len] != ip [len]) break;
181 len++; if (ref [len] != ip [len]) break;
182
183 len++; if (ref [len] != ip [len]) break;
184 len++; if (ref [len] != ip [len]) break;
185 len++; if (ref [len] != ip [len]) break;
186 len++; if (ref [len] != ip [len]) break;
187
188 len++; if (ref [len] != ip [len]) break;
189 len++; if (ref [len] != ip [len]) break;
190 len++; if (ref [len] != ip [len]) break;
191 len++; if (ref [len] != ip [len]) break;
192
193 len++; if (ref [len] != ip [len]) break;
194 len++; if (ref [len] != ip [len]) break;
195 len++; if (ref [len] != ip [len]) break;
196 len++; if (ref [len] != ip [len]) break;
197 }
198
199 do
200 len++;
201 while (len < maxlen && ref[len] == ip[len]);
202
203 break;
204 }
205
206 len -= 2; /* len is now #octets - 1 */
207 ip++;
208
209 if (len < 7)
210 {
211 *op++ = (off >> 8) + (len << 5);
212 }
213 else
214 {
215 *op++ = (off >> 8) + ( 7 << 5);
216 *op++ = len - 7;
217 }
218
219 *op++ = off;
220
221 lit = 0; op++; /* start run */
222
223 ip += len + 1;
224
225 if (expect_false (ip >= in_end - 2))
226 break;
227
228#if ULTRA_FAST || VERY_FAST
229 --ip;
230# if VERY_FAST && !ULTRA_FAST
231 --ip;
127# endif 232# endif
233 hval = FRST (ip);
234
235 hval = NEXT (hval, ip);
236 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
237 ip++;
238
239# if VERY_FAST && !ULTRA_FAST
240 hval = NEXT (hval, ip);
241 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
242 ip++;
128#endif 243# endif
244#else
245 ip -= len + 1;
129 246
130 for (;;) 247 do
248 {
249 hval = NEXT (hval, ip);
250 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
251 ip++;
252 }
253 while (len--);
254#endif
255 }
256 else
257 {
258 /* one more literal byte we must copy */
259 if (expect_false (op >= out_end))
260 return 0;
261
262 lit++; *op++ = *ip++;
263
264 if (expect_false (lit == MAX_LIT))
265 {
266 op [- lit - 1] = lit - 1; /* stop run */
267 lit = 0; op++; /* start run */
268 }
269 }
270 }
271
272 if (op + 3 > out_end) /* at most 3 bytes can be missing here */
273 return 0;
274
275 while (ip < in_end)
131 { 276 {
132 if (expect_true (ip < in_end - 2)) 277 lit++; *op++ = *ip++;
133 {
134 hval = NEXT (hval, ip);
135 hslot = htab + IDX (hval);
136 ref = *hslot; *hslot = ip;
137
138 if (1
139#if INIT_HTAB && !USE_MEMCPY
140 && ref < ip /* the next test will actually take care of this, but this is faster */
141#endif
142 && (off = ip - ref - 1) < MAX_OFF
143 && ip + 4 < in_end
144 && ref > (u8 *)in_data
145#if STRICT_ALIGN
146 x x x
147 && ref[0] == ip[0]
148 && ref[1] == ip[1]
149 && ref[2] == ip[2]
150#else
151 && *(u16 *)ref == *(u16 *)ip
152 && ref[2] == ip[2]
153#endif
154 )
155 {
156 /* match found at *ref++ */
157 unsigned int len = 2;
158 unsigned int maxlen = in_end - ip - len;
159 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
160
161 if (expect_false (op + lit + 1 + 3 >= out_end))
162 return 0;
163
164 if (expect_false (lit))
165 {
166 *op++ = lit - 1;
167 lit = -lit;
168 do
169 *op++ = ip[lit];
170 while (expect_false (++lit));
171 }
172
173 for (;;)
174 {
175 if (expect_true (maxlen > 16))
176 {
177 len++; if (ref [len] != ip [len]) break;
178 len++; if (ref [len] != ip [len]) break;
179 len++; if (ref [len] != ip [len]) break;
180 len++; if (ref [len] != ip [len]) break;
181 len++; if (ref [len] != ip [len]) break;
182 len++; if (ref [len] != ip [len]) break;
183 len++; if (ref [len] != ip [len]) break;
184 len++; if (ref [len] != ip [len]) break;
185 len++; if (ref [len] != ip [len]) break;
186 len++; if (ref [len] != ip [len]) break;
187 len++; if (ref [len] != ip [len]) break;
188 len++; if (ref [len] != ip [len]) break;
189 len++; if (ref [len] != ip [len]) break;
190 len++; if (ref [len] != ip [len]) break;
191 len++; if (ref [len] != ip [len]) break;
192 len++; if (ref [len] != ip [len]) break;
193 }
194
195 do
196 len++;
197 while (len < maxlen && ref[len] == ip[len]);
198
199 break;
200 }
201
202 len -= 2;
203 ip++;
204
205 if (len < 7)
206 {
207 *op++ = (off >> 8) + (len << 5);
208 }
209 else
210 {
211 *op++ = (off >> 8) + ( 7 << 5);
212 *op++ = len - 7;
213 }
214
215 *op++ = off;
216
217#if ULTRA_FAST || VERY_FAST
218 ip += len;
219#if VERY_FAST && !ULTRA_FAST
220 --ip;
221#endif
222 hval = FRST (ip);
223
224 hval = NEXT (hval, ip);
225 htab[IDX (hval)] = ip;
226 ip++;
227
228#if VERY_FAST && !ULTRA_FAST
229 hval = NEXT (hval, ip);
230 htab[IDX (hval)] = ip;
231 ip++;
232#endif
233#else
234 do
235 {
236 hval = NEXT (hval, ip);
237 htab[IDX (hval)] = ip;
238 ip++;
239 }
240 while (len--);
241#endif
242 continue;
243 }
244 }
245 else if (expect_false (ip == in_end))
246 break;
247
248 /* one more literal byte we must copy */
249 lit++;
250 ip++;
251 278
252 if (expect_false (lit == MAX_LIT)) 279 if (expect_false (lit == MAX_LIT))
253 { 280 {
254 if (op + 1 + MAX_LIT >= out_end) 281 op [- lit - 1] = lit - 1; /* stop run */
255 return 0; 282 lit = 0; op++; /* start run */
256
257 *op++ = MAX_LIT - 1;
258
259#ifdef lzf_movsb
260 ip -= MAX_LIT;
261 lzf_movsb (op, ip, lit);
262#else
263 lit = -lit;
264 do
265 *op++ = ip[lit];
266 while (++lit);
267#endif
268 } 283 }
269 } 284 }
270 285
271 if (lit) 286 op [- lit - 1] = lit - 1; /* end run */
272 { 287 op -= !lit; /* undo run if length is zero */
273 if (op + lit + 1 >= out_end)
274 return 0;
275 288
276 *op++ = lit - 1;
277#ifdef lzf_movsb
278 ip -= lit;
279 lzf_movsb (op, ip, lit);
280#else
281 lit = -lit;
282 do
283 *op++ = ip[lit];
284 while (++lit);
285#endif
286 }
287
288 return op - (u8 *) out_data; 289 return op - (u8 *)out_data;
289} 290}
290 291

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines