ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/liblzf/lzf_c.c
(Generate patch)

Comparing liblzf/lzf_c.c (file contents):
Revision 1.32 by root, Fri May 9 12:42:50 2008 UTC vs.
Revision 1.51 by root, Wed May 24 18:37:18 2017 UTC

1/* 1/*
2 * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de> 2 * Copyright (c) 2000-2010,2012 Marc Alexander Lehmann <schmorp@schmorp.de>
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without modifica- 4 * Redistribution and use in source and binary forms, with or without modifica-
5 * tion, are permitted provided that the following conditions are met: 5 * tion, are permitted provided that the following conditions are met:
6 * 6 *
7 * 1. Redistributions of source code must retain the above copyright notice, 7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer. 8 * this list of conditions and the following disclaimer.
9 * 9 *
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 16 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 17 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 18 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
38 38
39#define HSIZE (1 << (HLOG)) 39#define HSIZE (1 << (HLOG))
40 40
41/* 41/*
42 * don't play with this unless you benchmark! 42 * don't play with this unless you benchmark!
43 * decompression is not dependent on the hash function 43 * the data format is not dependent on the hash function.
44 * the hashing function might seem strange, just believe me 44 * the hash function might seem strange, just believe me,
45 * it works ;) 45 * it works ;)
46 */ 46 */
47#ifndef FRST 47#ifndef FRST
48# define FRST(p) (((p[0]) << 8) | p[1]) 48# define FRST(p) (((p[0]) << 8) | p[1])
49# define NEXT(v,p) (((v) << 8) | p[2]) 49# define NEXT(v,p) (((v) << 8) | p[2])
50# if MULTIPLICATION_IS_SLOW
50# if ULTRA_FAST 51# if ULTRA_FAST
51# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1)) 52# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1))
52# elif VERY_FAST 53# elif VERY_FAST
53# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) 54# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
55# else
56# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
57# endif
54# else 58# else
59/* this one was developed with sesse,
60 * and is very similar to the one in snappy.
61 * it does need a modern enough cpu with a fast multiplication.
62 */
55# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) 63# define IDX(h) (((h * 0x1e35a7bdU) >> (32 - HLOG - 8)) & (HSIZE - 1))
56# endif 64# endif
57#endif 65#endif
58/*
59 * IDX works because it is very similar to a multiplicative hash, e.g.
60 * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1))
61 * the latter is also quite fast on newer CPUs, and compresses similarly.
62 *
63 * the next one is also quite good, albeit slow ;)
64 * (int)(cos(h & 0xffffff) * 1e6)
65 */
66 66
67#if 0 67#if 0
68/* original lzv-like hash function, much worse and thus slower */ 68/* original lzv-like hash function, much worse and thus slower */
69# define FRST(p) (p[0] << 5) ^ p[1] 69# define FRST(p) (p[0] << 5) ^ p[1]
70# define NEXT(v,p) ((v) << 5) ^ p[2] 70# define NEXT(v,p) ((v) << 5) ^ p[2]
71# define IDX(h) ((h) & (HSIZE - 1)) 71# define IDX(h) ((h) & (HSIZE - 1))
72#endif 72#endif
73 73
74#define MAX_LIT (1 << 5)
75#define MAX_OFF (1 << 13)
76#define MAX_REF ((1 << 8) + (1 << 3))
77
78#if __GNUC__ >= 3 74#if __GNUC__ >= 3
79# define expect(expr,value) __builtin_expect ((expr),(value)) 75# define expect(expr,value) __builtin_expect ((expr),(value))
80# define inline inline 76# define inline inline
81#else 77#else
82# define expect(expr,value) (expr) 78# define expect(expr,value) (expr)
87#define expect_true(expr) expect ((expr) != 0, 1) 83#define expect_true(expr) expect ((expr) != 0, 1)
88 84
89/* 85/*
90 * compressed format 86 * compressed format
91 * 87 *
92 * 000LLLLL <L+1> ; literal 88 * 000LLLLL <L+1> ; literal, L+1=1..33 octets
93 * LLLooooo oooooooo ; backref L 89 * LLLooooo oooooooo ; backref L+1=1..7 octets, o+1=1..4096 offset
94 * 111ooooo LLLLLLLL oooooooo ; backref L+7 90 * 111ooooo LLLLLLLL oooooooo ; backref L+8 octets, o+1=1..4096 offset
95 * 91 *
96 */ 92 */
97 93
98unsigned int 94unsigned int
99lzf_compress (const void *const in_data, unsigned int in_len, 95lzf_compress (const void *const in_data, unsigned int in_len,
104 ) 100 )
105{ 101{
106#if !LZF_STATE_ARG 102#if !LZF_STATE_ARG
107 LZF_STATE htab; 103 LZF_STATE htab;
108#endif 104#endif
109 const u8 **hslot;
110 const u8 *ip = (const u8 *)in_data; 105 const u8 *ip = (const u8 *)in_data;
111 u8 *op = (u8 *)out_data; 106 u8 *op = (u8 *)out_data;
112 const u8 *in_end = ip + in_len; 107 const u8 *in_end = ip + in_len;
113 u8 *out_end = op + out_len; 108 u8 *out_end = op + out_len;
114 const u8 *ref; 109 const u8 *ref;
115 110
116 /* off requires a type wide enough to hold a general pointer difference. 111 /* off requires a type wide enough to hold a general pointer difference.
117 * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only 112 * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only
118 * works for differences within a single object). We also assume that no 113 * works for differences within a single object). We also assume that
119 * no bit pattern traps. Since the only platform that is both non-POSIX 114 * no bit pattern traps. Since the only platform that is both non-POSIX
120 * and fails to support both assumptions is windows 64 bit, we make a 115 * and fails to support both assumptions is windows 64 bit, we make a
121 * special workaround for it. 116 * special workaround for it.
122 */ 117 */
123#if defined (WIN32) && defined (_M_X64) 118#if defined (_WIN32) && defined (_M_X64)
124 unsigned _int64 off; /* workaround for missing POSIX compliance */ 119 /* workaround for missing POSIX compliance */
120 #if __GNUC__
121 unsigned long long off;
122 #else
123 unsigned __int64 off;
124 #endif
125#else 125#else
126 unsigned long off; 126 unsigned long off;
127#endif 127#endif
128 unsigned int hval; 128 unsigned int hval;
129 int lit; 129 int lit;
130 130
131 if (!in_len || !out_len) 131 if (!in_len || !out_len)
132 return 0; 132 return 0;
133 133
134#if INIT_HTAB 134#if INIT_HTAB
135 memset (htab, 0, sizeof (htab)); 135 memset (htab, 0, sizeof (LZF_STATE));
136# if 0
137 for (hslot = htab; hslot < htab + HSIZE; hslot++)
138 *hslot++ = ip;
139# endif
140#endif 136#endif
141 137
142 lit = 0; op++; /* start run */ 138 lit = 0; op++; /* start run */
143 139
144 hval = FRST (ip); 140 hval = FRST (ip);
145 while (ip < in_end - 2) 141 while (ip < in_end - 2)
146 { 142 {
143 LZF_HSLOT *hslot;
144
147 hval = NEXT (hval, ip); 145 hval = NEXT (hval, ip);
148 hslot = htab + IDX (hval); 146 hslot = htab + IDX (hval);
149 ref = *hslot; *hslot = ip; 147 ref = *hslot + LZF_HSLOT_BIAS; *hslot = ip - LZF_HSLOT_BIAS;
150 148
151 if (1 149 if (1
152#if INIT_HTAB 150#if INIT_HTAB
153 && ref < ip /* the next test will actually take care of this, but this is faster */ 151 && ref < ip /* the next test will actually take care of this, but this is faster */
154#endif 152#endif
155 && (off = ip - ref - 1) < MAX_OFF 153 && (off = ip - ref - 1) < LZF_MAX_OFF
156 && ip + 4 < in_end
157 && ref > (u8 *)in_data 154 && ref > (u8 *)in_data
155 && ref[2] == ip[2]
158#if STRICT_ALIGN 156#if STRICT_ALIGN
159 && ref[0] == ip[0] 157 && ((ref[1] << 8) | ref[0]) == ((ip[1] << 8) | ip[0])
160 && ref[1] == ip[1]
161 && ref[2] == ip[2]
162#else 158#else
163 && *(u16 *)ref == *(u16 *)ip 159 && *(u16 *)ref == *(u16 *)ip
164 && ref[2] == ip[2]
165#endif 160#endif
166 ) 161 )
167 { 162 {
168 /* match found at *ref++ */ 163 /* match found at *ref++ */
169 unsigned int len = 2; 164 unsigned int len = 2;
170 unsigned int maxlen = in_end - ip - len; 165 unsigned int maxlen = in_end - ip - len;
171 maxlen = maxlen > MAX_REF ? MAX_REF : maxlen; 166 maxlen = maxlen > LZF_MAX_REF ? LZF_MAX_REF : maxlen;
167
168 if (expect_false (op + 3 + 1 >= out_end)) /* first a faster conservative test */
169 if (op - !lit + 3 + 1 >= out_end) /* second the exact but rare test */
170 return 0;
172 171
173 op [- lit - 1] = lit - 1; /* stop run */ 172 op [- lit - 1] = lit - 1; /* stop run */
174 op -= !lit; /* undo run if length is zero */ 173 op -= !lit; /* undo run if length is zero */
175
176 if (expect_false (op + 3 + 1 >= out_end))
177 return 0;
178 174
179 for (;;) 175 for (;;)
180 { 176 {
181 if (expect_true (maxlen > 16)) 177 if (expect_true (maxlen > 16))
182 { 178 {
206 while (len < maxlen && ref[len] == ip[len]); 202 while (len < maxlen && ref[len] == ip[len]);
207 203
208 break; 204 break;
209 } 205 }
210 206
211 len -= 2; 207 len -= 2; /* len is now #octets - 1 */
212 ip++; 208 ip++;
213 209
214 if (len < 7) 210 if (len < 7)
215 { 211 {
216 *op++ = (off >> 8) + (len << 5); 212 *op++ = (off >> 8) + (len << 5);
221 *op++ = len - 7; 217 *op++ = len - 7;
222 } 218 }
223 219
224 *op++ = off; 220 *op++ = off;
225 221
222 lit = 0; op++; /* start run */
223
224 ip += len + 1;
225
226 if (expect_false (ip >= in_end - 2))
227 break;
228
226#if ULTRA_FAST || VERY_FAST 229#if ULTRA_FAST || VERY_FAST
227 ip += len;
228#if VERY_FAST && !ULTRA_FAST
229 --ip; 230 --ip;
231# if VERY_FAST && !ULTRA_FAST
232 --ip;
230#endif 233# endif
231 hval = FRST (ip); 234 hval = FRST (ip);
232 235
233 hval = NEXT (hval, ip); 236 hval = NEXT (hval, ip);
234 htab[IDX (hval)] = ip; 237 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
235 ip++; 238 ip++;
236 239
237#if VERY_FAST && !ULTRA_FAST 240# if VERY_FAST && !ULTRA_FAST
238 hval = NEXT (hval, ip); 241 hval = NEXT (hval, ip);
239 htab[IDX (hval)] = ip; 242 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
240 ip++; 243 ip++;
241#endif 244# endif
242#else 245#else
246 ip -= len + 1;
247
243 do 248 do
244 { 249 {
245 hval = NEXT (hval, ip); 250 hval = NEXT (hval, ip);
246 htab[IDX (hval)] = ip; 251 htab[IDX (hval)] = ip - LZF_HSLOT_BIAS;
247 ip++; 252 ip++;
248 } 253 }
249 while (len--); 254 while (len--);
250#endif 255#endif
251
252 lit = 0; op++; /* start run */
253 } 256 }
254 else 257 else
255 { 258 {
256 /* one more literal byte we must copy */ 259 /* one more literal byte we must copy */
257 if (expect_false (op >= out_end)) 260 if (expect_false (op >= out_end))
258 return 0; 261 return 0;
259 262
260 lit++; *op++ = *ip++; 263 lit++; *op++ = *ip++;
261 264
262 if (expect_false (lit == MAX_LIT)) 265 if (expect_false (lit == LZF_MAX_LIT))
263 { 266 {
264 op [- lit - 1] = lit - 1; /* stop run */ 267 op [- lit - 1] = lit - 1; /* stop run */
265 lit = 0; op++; /* start run */ 268 lit = 0; op++; /* start run */
266 } 269 }
267 } 270 }
272 275
273 while (ip < in_end) 276 while (ip < in_end)
274 { 277 {
275 lit++; *op++ = *ip++; 278 lit++; *op++ = *ip++;
276 279
277 if (expect_false (lit == MAX_LIT)) 280 if (expect_false (lit == LZF_MAX_LIT))
278 { 281 {
279 op [- lit - 1] = lit - 1; /* stop run */ 282 op [- lit - 1] = lit - 1; /* stop run */
280 lit = 0; op++; /* start run */ 283 lit = 0; op++; /* start run */
281 } 284 }
282 } 285 }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines