ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/liblzf/lzf_c.c
(Generate patch)

Comparing liblzf/lzf_c.c (file contents):
Revision 1.18 by root, Tue Nov 13 08:17:38 2007 UTC vs.
Revision 1.48 by root, Sat Jun 30 21:39:41 2012 UTC

1/* 1/*
2 * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2000-2010,2012 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
38 38
39#define HSIZE (1 << (HLOG)) 39#define HSIZE (1 << (HLOG))
40 40
41/* 41/*
42 * don't play with this unless you benchmark! 42 * don't play with this unless you benchmark!
43 * decompression is not dependent on the hash function 43 * the data format is not dependent on the hash function.
44 * the hashing function might seem strange, just believe me 44 * the hash function might seem strange, just believe me,
45 * it works ;) 45 * it works ;)
46 */ 46 */
47#ifndef FRST 47#ifndef FRST
48# define FRST(p) (((p[0]) << 8) | p[1]) 48# define FRST(p) (((p[0]) << 8) | p[1])
49# define NEXT(v,p) (((v) << 8) | p[2]) 49# define NEXT(v,p) (((v) << 8) | p[2])
50# if MULTIPLICATION_IS_SLOW
51# if ULTRA_FAST
52# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1))
53# elif VERY_FAST
54# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
55# else
50# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) 56# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
51/*# define IDX(h) ((ip[0] * 121 ^ ip[1] * 33 ^ ip[2] * 1) & (HSIZE-1))*/ 57# endif
58# else
59/* this one was developed with sesse,
60 * and is very similar to the one in snappy.
61 * it does need a modern enough cpu with a fast multiplication.
62 */
63# define IDX(h) (((h * 0x1e35a7bdU) >> (32 - HLOG - 8)) & (HSIZE - 1))
52#endif 64# endif
53/* 65#endif
54 * IDX works because it is very similar to a multiplicative hash, e.g.
55 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1))
56 * the latter is also quite fast on newer CPUs, and compresses similarly.
57 *
58 * the next one is also quite good, albeit slow ;)
59 * (int)(cos(h & 0xffffff) * 1e6)
60 */
61 66
62#if 0 67#if 0
63/* original lzv-like hash function, much worse and thus slower */ 68/* original lzv-like hash function, much worse and thus slower */
64# define FRST(p) (p[0] << 5) ^ p[1] 69# define FRST(p) (p[0] << 5) ^ p[1]
65# define NEXT(v,p) ((v) << 5) ^ p[2] 70# define NEXT(v,p) ((v) << 5) ^ p[2]
68 73
69#define MAX_LIT (1 << 5) 74#define MAX_LIT (1 << 5)
70#define MAX_OFF (1 << 13) 75#define MAX_OFF (1 << 13)
71#define MAX_REF ((1 << 8) + (1 << 3)) 76#define MAX_REF ((1 << 8) + (1 << 3))
72 77
73#if (__i386 || __amd64) && __GNUC__ >= 3
74# define lzf_movsb(dst, src, len) \
75 asm ("rep movsb" \
76 : "=D" (dst), "=S" (src), "=c" (len) \
77 : "0" (dst), "1" (src), "2" (len));
78#endif
79
80#if __GNUC__ >= 3 78#if __GNUC__ >= 3
81# define expect(expr,value) __builtin_expect ((expr),(value)) 79# define expect(expr,value) __builtin_expect ((expr),(value))
82# define inline inline 80# define inline inline
83#else 81#else
84# define expect(expr,value) (expr) 82# define expect(expr,value) (expr)
89#define expect_true(expr) expect ((expr) != 0, 1) 87#define expect_true(expr) expect ((expr) != 0, 1)
90 88
91/* 89/*
92 * compressed format 90 * compressed format
93 * 91 *
94 * 000LLLLL <L+1> ; literal 92 * 000LLLLL <L+1> ; literal, L+1=1..33 octets
95 * LLLooooo oooooooo ; backref L 93 * LLLooooo oooooooo ; backref L+1=1..7 octets, o+1=1..4096 offset
96 * 111ooooo LLLLLLLL oooooooo ; backref L+7 94 * 111ooooo LLLLLLLL oooooooo ; backref L+8 octets, o+1=1..4096 offset
97 * 95 *
98 */ 96 */
99 97
100unsigned int 98unsigned int
101lzf_compress (const void *const in_data, unsigned int in_len, 99lzf_compress (const void *const in_data, unsigned int in_len,
106 ) 104 )
107{ 105{
108#if !LZF_STATE_ARG 106#if !LZF_STATE_ARG
109 LZF_STATE htab; 107 LZF_STATE htab;
110#endif 108#endif
111 const u8 **hslot;
112 const u8 *ip = (const u8 *)in_data; 109 const u8 *ip = (const u8 *)in_data;
113 u8 *op = (u8 *)out_data; 110 u8 *op = (u8 *)out_data;
114 const u8 *in_end = ip + in_len; 111 const u8 *in_end = ip + in_len;
115 u8 *out_end = op + out_len; 112 u8 *out_end = op + out_len;
116 const u8 *ref; 113 const u8 *ref;
117 114
118 unsigned int hval = FRST (ip); 115 /* off requires a type wide enough to hold a general pointer difference.
116 * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only
117 * works for differences within a single object). We also assume that
118 * no bit pattern traps. Since the only platform that is both non-POSIX
119 * and fails to support both assumptions is windows 64 bit, we make a
120 * special workaround for it.
121 */
122#if defined (_WIN32) && defined (_M_X64)
123 /* workaround for missing POSIX compliance */
124 #if __GNUC__
125 unsigned long long off;
126 #else
127 unsigned __int64 off;
128 #endif
129#else
119 unsigned long off; 130 unsigned long off;
120 int lit = 0; 131#endif
132 unsigned int hval;
133 int lit;
134
135 if (!in_len || !out_len)
136 return 0;
121 137
122#if INIT_HTAB 138#if INIT_HTAB
123 memset (htab, 0, sizeof (htab)); 139 memset (htab, 0, sizeof (htab));
124# if 0 140#endif
125 for (hslot = htab; hslot < htab + HSIZE; hslot++) 141
126 *hslot++ = ip; 142 lit = 0; op++; /* start run */
143
144 hval = FRST (ip);
145 while (ip < in_end - 2)
146 {
147 LZF_HSLOT *hslot;
148
149 hval = NEXT (hval, ip);
150 hslot = htab + IDX (hval);
151 ref = *hslot + LZF_HSLOT_BIAS; *hslot = ip - LZF_HSLOT_BIAS;
152
153 if (1
154#if INIT_HTAB
155 && ref < ip /* the next test will actually take care of this, but this is faster */
156#endif
157 && (off = ip - ref - 1) < MAX_OFF
158 && ref > (u8 *)in_data
159 && ref[2] == ip[2]
160#if STRICT_ALIGN
161 && ((ref[1] << 8) | ref[0]) == ((ip[1] << 8) | ip[0])
162#else
163 && *(u16 *)ref == *(u16 *)ip
164#endif
165 )
166 {
167 /* match found at *ref++ */
168 unsigned int len = 2;
169 unsigned int maxlen = in_end - ip - len;
170 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
171
172 if (expect_false (op + 3 + 1 >= out_end)) /* first a faster conservative test */
173 if (op - !lit + 3 + 1 >= out_end) /* second the exact but rare test */
174 return 0;
175
176 op [- lit - 1] = lit - 1; /* stop run */
177 op -= !lit; /* undo run if length is zero */
178
179 for (;;)
180 {
181 if (expect_true (maxlen > 16))
182 {
183 len++; if (ref [len] != ip [len]) break;
184 len++; if (ref [len] != ip [len]) break;
185 len++; if (ref [len] != ip [len]) break;
186 len++; if (ref [len] != ip [len]) break;
187
188 len++; if (ref [len] != ip [len]) break;
189 len++; if (ref [len] != ip [len]) break;
190 len++; if (ref [len] != ip [len]) break;
191 len++; if (ref [len] != ip [len]) break;
192
193 len++; if (ref [len] != ip [len]) break;
194 len++; if (ref [len] != ip [len]) break;
195 len++; if (ref [len] != ip [len]) break;
196 len++; if (ref [len] != ip [len]) break;
197
198 len++; if (ref [len] != ip [len]) break;
199 len++; if (ref [len] != ip [len]) break;
200 len++; if (ref [len] != ip [len]) break;
201 len++; if (ref [len] != ip [len]) break;
202 }
203
204 do
205 len++;
206 while (len < maxlen && ref[len] == ip[len]);
207
208 break;
209 }
210
211 len -= 2; /* len is now #octets - 1 */
212 ip++;
213
214 if (len < 7)
215 {
216 *op++ = (off >> 8) + (len << 5);
217 }
218 else
219 {
220 *op++ = (off >> 8) + ( 7 << 5);
221 *op++ = len - 7;
222 }
223
224 *op++ = off;
225
226 lit = 0; op++; /* start run */
227
228 ip += len + 1;
229
230 if (expect_false (ip >= in_end - 2))
231 break;
232
233#if ULTRA_FAST || VERY_FAST
234 --ip;
235# if VERY_FAST && !ULTRA_FAST
236 --ip;
127# endif 237# endif
238 hval = FRST (ip);
239
240 hval = NEXT (hval, ip);
241 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
242 ip++;
243
244# if VERY_FAST && !ULTRA_FAST
245 hval = NEXT (hval, ip);
246 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
247 ip++;
128#endif 248# endif
249#else
250 ip -= len + 1;
129 251
130 for (;;) 252 do
253 {
254 hval = NEXT (hval, ip);
255 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
256 ip++;
257 }
258 while (len--);
259#endif
260 }
261 else
262 {
263 /* one more literal byte we must copy */
264 if (expect_false (op >= out_end))
265 return 0;
266
267 lit++; *op++ = *ip++;
268
269 if (expect_false (lit == MAX_LIT))
270 {
271 op [- lit - 1] = lit - 1; /* stop run */
272 lit = 0; op++; /* start run */
273 }
274 }
275 }
276
277 if (op + 3 > out_end) /* at most 3 bytes can be missing here */
278 return 0;
279
280 while (ip < in_end)
131 { 281 {
132 if (expect_true (ip < in_end - 2)) 282 lit++; *op++ = *ip++;
133 {
134 hval = NEXT (hval, ip);
135 hslot = htab + IDX (hval);
136 ref = *hslot; *hslot = ip;
137
138 if (1
139#if INIT_HTAB && !USE_MEMCPY
140 && ref < ip /* the next test will actually take care of this, but this is faster */
141#endif
142 && (off = ip - ref - 1) < MAX_OFF
143 && ip + 4 < in_end
144 && ref > (u8 *)in_data
145#if STRICT_ALIGN
146 && ref[0] == ip[0]
147 && ref[1] == ip[1]
148 && ref[2] == ip[2]
149#else
150 && *(u16 *)ref == *(u16 *)ip
151 && ref[2] == ip[2]
152#endif
153 )
154 {
155 /* match found at *ref++ */
156 unsigned int len = 2;
157 unsigned int maxlen = in_end - ip - len;
158 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
159
160 if (expect_false (op + lit + 1 + 3 >= out_end))
161 return 0;
162
163 if (lit)
164 {
165 *op++ = lit - 1;
166 lit = -lit;
167 do
168 *op++ = ip[lit];
169 while (++lit);
170 }
171
172 for (;;)
173 {
174 if (expect_true (ip < in_end - 2 - 8 && maxlen > 8))
175 {
176 len++; if (ref [len] != ip [len]) break;
177 len++; if (ref [len] != ip [len]) break;
178 len++; if (ref [len] != ip [len]) break;
179 len++; if (ref [len] != ip [len]) break;
180 len++; if (ref [len] != ip [len]) break;
181 len++; if (ref [len] != ip [len]) break;
182 len++; if (ref [len] != ip [len]) break;
183 len++; if (ref [len] != ip [len]) break;
184 }
185
186 do
187 len++;
188 while (len < maxlen && ref[len] == ip[len]);
189
190 break;
191 }
192
193 len -= 2;
194 ip++;
195
196 if (len < 7)
197 {
198 *op++ = (off >> 8) + (len << 5);
199 }
200 else
201 {
202 *op++ = (off >> 8) + ( 7 << 5);
203 *op++ = len - 7;
204 }
205
206 *op++ = off;
207
208#if ULTRA_FAST || VERY_FAST
209 ip += len;
210#if VERY_FAST && !ULTRA_FAST
211 --ip;
212#endif
213 hval = FRST (ip);
214
215 hval = NEXT (hval, ip);
216 htab[IDX (hval)] = ip;
217 ip++;
218
219#if VERY_FAST && !ULTRA_FAST
220 hval = NEXT (hval, ip);
221 htab[IDX (hval)] = ip;
222 ip++;
223#endif
224#else
225 do
226 {
227 hval = NEXT (hval, ip);
228 htab[IDX (hval)] = ip;
229 ip++;
230 }
231 while (len--);
232#endif
233 continue;
234 }
235 }
236 else if (expect_false (ip == in_end))
237 break;
238
239 /* one more literal byte we must copy */
240 lit++;
241 ip++;
242 283
243 if (expect_false (lit == MAX_LIT)) 284 if (expect_false (lit == MAX_LIT))
244 { 285 {
245 if (op + 1 + MAX_LIT >= out_end) 286 op [- lit - 1] = lit - 1; /* stop run */
246 return 0; 287 lit = 0; op++; /* start run */
247
248 *op++ = MAX_LIT - 1;
249
250#ifdef lzf_movsb
251 ip -= lit;
252 lzf_movsb (op, ip, lit);
253#else
254 lit = -lit;
255 do
256 *op++ = ip[lit];
257 while (++lit);
258#endif
259 } 288 }
260 } 289 }
261 290
262 if (lit) 291 op [- lit - 1] = lit - 1; /* end run */
263 { 292 op -= !lit; /* undo run if length is zero */
264 if (op + lit + 1 >= out_end)
265 return 0;
266 293
267 *op++ = lit - 1;
268#ifdef lzf_movsb
269 ip -= lit;
270 lzf_movsb (op, ip, lit);
271#else
272 lit = -lit;
273 do
274 *op++ = ip[lit];
275 while (++lit);
276#endif
277 }
278
279 return op - (u8 *) out_data; 294 return op - (u8 *)out_data;
280} 295}
281 296

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines