1 | /* |
1 | /* |
2 | * Copyright (c) 2000-2010 Marc Alexander Lehmann <schmorp@schmorp.de> |
2 | * Copyright (c) 2000-2010,2012 Marc Alexander Lehmann <schmorp@schmorp.de> |
3 | * |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without modifica- |
4 | * Redistribution and use in source and binary forms, with or without modifica- |
5 | * tion, are permitted provided that the following conditions are met: |
5 | * tion, are permitted provided that the following conditions are met: |
6 | * |
6 | * |
7 | * 1. Redistributions of source code must retain the above copyright notice, |
7 | * 1. Redistributions of source code must retain the above copyright notice, |
… | |
… | |
45 | * it works ;) |
45 | * it works ;) |
46 | */ |
46 | */ |
47 | #ifndef FRST |
47 | #ifndef FRST |
48 | # define FRST(p) (((p[0]) << 8) | p[1]) |
48 | # define FRST(p) (((p[0]) << 8) | p[1]) |
49 | # define NEXT(v,p) (((v) << 8) | p[2]) |
49 | # define NEXT(v,p) (((v) << 8) | p[2]) |
|
|
50 | # if MULTIPLICATION_IS_SLOW |
50 | # if ULTRA_FAST |
51 | # if ULTRA_FAST |
51 | # define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1)) |
52 | # define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1)) |
52 | # elif VERY_FAST |
53 | # elif VERY_FAST |
53 | # define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) |
54 | # define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) |
|
|
55 | # else |
|
|
56 | # define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) |
|
|
57 | # endif |
54 | # else |
58 | # else |
|
|
59 | /* this one was developed with sesse, |
|
|
60 | * and is very similar to the one in snappy. |
|
|
61 | * it does need a modern enough cpu with a fast multiplication. |
|
|
62 | */ |
55 | # define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) |
63 | # define IDX(h) (((h * 0x1e35a7bdU) >> (32 - HLOG - 8)) & (HSIZE - 1)) |
56 | # endif |
64 | # endif |
57 | #endif |
65 | #endif |
58 | /* |
|
|
59 | * IDX works because it is very similar to a multiplicative hash, e.g. |
|
|
60 | * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1)) |
|
|
61 | * the latter is also quite fast on newer CPUs, and compresses similarly. |
|
|
62 | * |
|
|
63 | * the next one is also quite good, albeit slow ;) |
|
|
64 | * (int)(cos(h & 0xffffff) * 1e6) |
|
|
65 | */ |
|
|
66 | |
66 | |
67 | #if 0 |
67 | #if 0 |
68 | /* original lzv-like hash function, much worse and thus slower */ |
68 | /* original lzv-like hash function, much worse and thus slower */ |
69 | # define FRST(p) (p[0] << 5) ^ p[1] |
69 | # define FRST(p) (p[0] << 5) ^ p[1] |
70 | # define NEXT(v,p) ((v) << 5) ^ p[2] |
70 | # define NEXT(v,p) ((v) << 5) ^ p[2] |
… | |
… | |
112 | u8 *out_end = op + out_len; |
112 | u8 *out_end = op + out_len; |
113 | const u8 *ref; |
113 | const u8 *ref; |
114 | |
114 | |
115 | /* off requires a type wide enough to hold a general pointer difference. |
115 | /* off requires a type wide enough to hold a general pointer difference. |
116 | * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only |
116 | * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only |
117 | * works for differences within a single object). We also assume that no |
117 | * works for differences within a single object). We also assume that |
118 | * no bit pattern traps. Since the only platform that is both non-POSIX |
118 | * no bit pattern traps. Since the only platform that is both non-POSIX |
119 | * and fails to support both assumptions is windows 64 bit, we make a |
119 | * and fails to support both assumptions is windows 64 bit, we make a |
120 | * special workaround for it. |
120 | * special workaround for it. |
121 | */ |
121 | */ |
122 | #if defined (WIN32) && defined (_M_X64) |
122 | #if defined (_WIN32) && defined (_M_X64) |
123 | unsigned _int64 off; /* workaround for missing POSIX compliance */ |
123 | /* workaround for missing POSIX compliance */ |
|
|
124 | #if __GNUC__ |
|
|
125 | unsigned long long off; |
|
|
126 | #else |
|
|
127 | unsigned __int64 off; |
|
|
128 | #endif |
124 | #else |
129 | #else |
125 | unsigned long off; |
130 | unsigned long off; |
126 | #endif |
131 | #endif |
127 | unsigned int hval; |
132 | unsigned int hval; |
128 | int lit; |
133 | int lit; |
… | |
… | |
148 | if (1 |
153 | if (1 |
149 | #if INIT_HTAB |
154 | #if INIT_HTAB |
150 | && ref < ip /* the next test will actually take care of this, but this is faster */ |
155 | && ref < ip /* the next test will actually take care of this, but this is faster */ |
151 | #endif |
156 | #endif |
152 | && (off = ip - ref - 1) < MAX_OFF |
157 | && (off = ip - ref - 1) < MAX_OFF |
153 | && ip + 4 < in_end |
|
|
154 | && ref > (u8 *)in_data |
158 | && ref > (u8 *)in_data |
155 | && ref[2] == ip[2] |
159 | && ref[2] == ip[2] |
156 | #if STRICT_ALIGN |
160 | #if STRICT_ALIGN |
157 | && (ref[1] << 8) | ref[0] == (ip[1] << 8) | ip[0] |
161 | && ((ref[1] << 8) | ref[0]) == ((ip[1] << 8) | ip[0]) |
158 | #else |
162 | #else |
159 | && *(u16 *)ref == *(u16 *)ip |
163 | && *(u16 *)ref == *(u16 *)ip |
160 | #endif |
164 | #endif |
161 | ) |
165 | ) |
162 | { |
166 | { |
… | |
… | |
216 | *op++ = (off >> 8) + ( 7 << 5); |
220 | *op++ = (off >> 8) + ( 7 << 5); |
217 | *op++ = len - 7; |
221 | *op++ = len - 7; |
218 | } |
222 | } |
219 | |
223 | |
220 | *op++ = off; |
224 | *op++ = off; |
|
|
225 | |
221 | lit = 0; op++; /* start run */ |
226 | lit = 0; op++; /* start run */ |
222 | |
227 | |
223 | ip += len + 1; |
228 | ip += len + 1; |
224 | |
229 | |
225 | if (expect_false (ip >= in_end - 2)) |
230 | if (expect_false (ip >= in_end - 2)) |