Dinesh Belwalkar | e63f7f9 | 2019-11-22 23:11:16 +0000 | [diff] [blame] | 1 | package lz4 |
| 2 | |
| 3 | import ( |
| 4 | "encoding/binary" |
| 5 | "fmt" |
| 6 | "math/bits" |
| 7 | ) |
| 8 | |
| 9 | // blockHash hashes the lower 6 bytes into a value < htSize. |
| 10 | func blockHash(x uint64) uint32 { |
| 11 | const prime6bytes = 227718039650203 |
| 12 | return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) |
| 13 | } |
| 14 | |
| 15 | // CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. |
| 16 | func CompressBlockBound(n int) int { |
| 17 | return n + n/255 + 16 |
| 18 | } |
| 19 | |
| 20 | // UncompressBlock uncompresses the source buffer into the destination one, |
| 21 | // and returns the uncompressed size. |
| 22 | // |
| 23 | // The destination buffer must be sized appropriately. |
| 24 | // |
| 25 | // An error is returned if the source data is invalid or the destination buffer is too small. |
| 26 | func UncompressBlock(src, dst []byte) (int, error) { |
| 27 | if len(src) == 0 { |
| 28 | return 0, nil |
| 29 | } |
| 30 | if di := decodeBlock(dst, src); di >= 0 { |
| 31 | return di, nil |
| 32 | } |
| 33 | return 0, ErrInvalidSourceShortBuffer |
| 34 | } |
| 35 | |
| 36 | // CompressBlock compresses the source buffer into the destination one. |
| 37 | // This is the fast version of LZ4 compression and also the default one. |
| 38 | // The size of hashTable must be at least 64Kb. |
| 39 | // |
| 40 | // The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. |
| 41 | // |
| 42 | // An error is returned if the destination buffer is too small. |
| 43 | func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { |
| 44 | defer recoverBlock(&err) |
| 45 | |
| 46 | // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. |
| 47 | // This significantly speeds up incompressible data and usually has very small impact on compresssion. |
| 48 | // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) |
| 49 | const adaptSkipLog = 7 |
| 50 | sn, dn := len(src)-mfLimit, len(dst) |
| 51 | if sn <= 0 || dn == 0 { |
| 52 | return 0, nil |
| 53 | } |
| 54 | if len(hashTable) < htSize { |
| 55 | return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) |
| 56 | } |
| 57 | // Prove to the compiler the table has at least htSize elements. |
| 58 | // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. |
| 59 | hashTable = hashTable[:htSize] |
| 60 | |
| 61 | // si: Current position of the search. |
| 62 | // anchor: Position of the current literals. |
| 63 | var si, anchor int |
| 64 | |
| 65 | // Fast scan strategy: the hash table only stores the last 4 bytes sequences. |
| 66 | for si < sn { |
| 67 | // Hash the next 6 bytes (sequence)... |
| 68 | match := binary.LittleEndian.Uint64(src[si:]) |
| 69 | h := blockHash(match) |
| 70 | h2 := blockHash(match >> 8) |
| 71 | |
| 72 | // We check a match at s, s+1 and s+2 and pick the first one we get. |
| 73 | // Checking 3 only requires us to load the source one. |
| 74 | ref := hashTable[h] |
| 75 | ref2 := hashTable[h2] |
| 76 | hashTable[h] = si |
| 77 | hashTable[h2] = si + 1 |
| 78 | offset := si - ref |
| 79 | |
| 80 | // If offset <= 0 we got an old entry in the hash table. |
| 81 | if offset <= 0 || offset >= winSize || // Out of window. |
| 82 | uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. |
| 83 | // No match. Start calculating another hash. |
| 84 | // The processor can usually do this out-of-order. |
| 85 | h = blockHash(match >> 16) |
| 86 | ref = hashTable[h] |
| 87 | |
| 88 | // Check the second match at si+1 |
| 89 | si += 1 |
| 90 | offset = si - ref2 |
| 91 | |
| 92 | if offset <= 0 || offset >= winSize || |
| 93 | uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { |
| 94 | // No match. Check the third match at si+2 |
| 95 | si += 1 |
| 96 | offset = si - ref |
| 97 | hashTable[h] = si |
| 98 | |
| 99 | if offset <= 0 || offset >= winSize || |
| 100 | uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { |
| 101 | // Skip one extra byte (at si+3) before we check 3 matches again. |
| 102 | si += 2 + (si-anchor)>>adaptSkipLog |
| 103 | continue |
| 104 | } |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | // Match found. |
| 109 | lLen := si - anchor // Literal length. |
| 110 | // We already matched 4 bytes. |
| 111 | mLen := 4 |
| 112 | |
| 113 | // Extend backwards if we can, reducing literals. |
| 114 | tOff := si - offset - 1 |
| 115 | for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { |
| 116 | si-- |
| 117 | tOff-- |
| 118 | lLen-- |
| 119 | mLen++ |
| 120 | } |
| 121 | |
| 122 | // Add the match length, so we continue search at the end. |
| 123 | // Use mLen to store the offset base. |
| 124 | si, mLen = si+mLen, si+minMatch |
| 125 | |
| 126 | // Find the longest match by looking by batches of 8 bytes. |
| 127 | for si < sn { |
| 128 | x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) |
| 129 | if x == 0 { |
| 130 | si += 8 |
| 131 | } else { |
| 132 | // Stop is first non-zero byte. |
| 133 | si += bits.TrailingZeros64(x) >> 3 |
| 134 | break |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | mLen = si - mLen |
| 139 | if mLen < 0xF { |
| 140 | dst[di] = byte(mLen) |
| 141 | } else { |
| 142 | dst[di] = 0xF |
| 143 | } |
| 144 | |
| 145 | // Encode literals length. |
| 146 | if lLen < 0xF { |
| 147 | dst[di] |= byte(lLen << 4) |
| 148 | } else { |
| 149 | dst[di] |= 0xF0 |
| 150 | di++ |
| 151 | l := lLen - 0xF |
| 152 | for ; l >= 0xFF; l -= 0xFF { |
| 153 | dst[di] = 0xFF |
| 154 | di++ |
| 155 | } |
| 156 | dst[di] = byte(l) |
| 157 | } |
| 158 | di++ |
| 159 | |
| 160 | // Literals. |
| 161 | copy(dst[di:di+lLen], src[anchor:anchor+lLen]) |
| 162 | di += lLen + 2 |
| 163 | anchor = si |
| 164 | |
| 165 | // Encode offset. |
| 166 | _ = dst[di] // Bound check elimination. |
| 167 | dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) |
| 168 | |
| 169 | // Encode match length part 2. |
| 170 | if mLen >= 0xF { |
| 171 | for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { |
| 172 | dst[di] = 0xFF |
| 173 | di++ |
| 174 | } |
| 175 | dst[di] = byte(mLen) |
| 176 | di++ |
| 177 | } |
| 178 | // Check if we can load next values. |
| 179 | if si >= sn { |
| 180 | break |
| 181 | } |
| 182 | // Hash match end-2 |
| 183 | h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) |
| 184 | hashTable[h] = si - 2 |
| 185 | } |
| 186 | |
| 187 | if anchor == 0 { |
| 188 | // Incompressible. |
| 189 | return 0, nil |
| 190 | } |
| 191 | |
| 192 | // Last literals. |
| 193 | lLen := len(src) - anchor |
| 194 | if lLen < 0xF { |
| 195 | dst[di] = byte(lLen << 4) |
| 196 | } else { |
| 197 | dst[di] = 0xF0 |
| 198 | di++ |
| 199 | for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { |
| 200 | dst[di] = 0xFF |
| 201 | di++ |
| 202 | } |
| 203 | dst[di] = byte(lLen) |
| 204 | } |
| 205 | di++ |
| 206 | |
| 207 | // Write the last literals. |
| 208 | if di >= anchor { |
| 209 | // Incompressible. |
| 210 | return 0, nil |
| 211 | } |
| 212 | di += copy(dst[di:di+len(src)-anchor], src[anchor:]) |
| 213 | return di, nil |
| 214 | } |
| 215 | |
| 216 | // blockHash hashes 4 bytes into a value < winSize. |
| 217 | func blockHashHC(x uint32) uint32 { |
| 218 | const hasher uint32 = 2654435761 // Knuth multiplicative hash. |
| 219 | return x * hasher >> (32 - winSizeLog) |
| 220 | } |
| 221 | |
| 222 | // CompressBlockHC compresses the source buffer src into the destination dst |
| 223 | // with max search depth (use 0 or negative value for no max). |
| 224 | // |
| 225 | // CompressBlockHC compression ratio is better than CompressBlock but it is also slower. |
| 226 | // |
| 227 | // The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. |
| 228 | // |
| 229 | // An error is returned if the destination buffer is too small. |
| 230 | func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { |
| 231 | defer recoverBlock(&err) |
| 232 | |
| 233 | // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. |
| 234 | // This significantly speeds up incompressible data and usually has very small impact on compresssion. |
| 235 | // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) |
| 236 | const adaptSkipLog = 7 |
| 237 | |
| 238 | sn, dn := len(src)-mfLimit, len(dst) |
| 239 | if sn <= 0 || dn == 0 { |
| 240 | return 0, nil |
| 241 | } |
| 242 | var si int |
| 243 | |
| 244 | // hashTable: stores the last position found for a given hash |
| 245 | // chainTable: stores previous positions for a given hash |
| 246 | var hashTable, chainTable [winSize]int |
| 247 | |
| 248 | if depth <= 0 { |
| 249 | depth = winSize |
| 250 | } |
| 251 | |
| 252 | anchor := si |
| 253 | for si < sn { |
| 254 | // Hash the next 4 bytes (sequence). |
| 255 | match := binary.LittleEndian.Uint32(src[si:]) |
| 256 | h := blockHashHC(match) |
| 257 | |
| 258 | // Follow the chain until out of window and give the longest match. |
| 259 | mLen := 0 |
| 260 | offset := 0 |
| 261 | for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { |
| 262 | // The first (mLen==0) or next byte (mLen>=minMatch) at current match length |
| 263 | // must match to improve on the match length. |
| 264 | if src[next+mLen] != src[si+mLen] { |
| 265 | continue |
| 266 | } |
| 267 | ml := 0 |
| 268 | // Compare the current position with a previous with the same hash. |
| 269 | for ml < sn-si { |
| 270 | x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) |
| 271 | if x == 0 { |
| 272 | ml += 8 |
| 273 | } else { |
| 274 | // Stop is first non-zero byte. |
| 275 | ml += bits.TrailingZeros64(x) >> 3 |
| 276 | break |
| 277 | } |
| 278 | } |
| 279 | if ml < minMatch || ml <= mLen { |
| 280 | // Match too small (<minMath) or smaller than the current match. |
| 281 | continue |
| 282 | } |
| 283 | // Found a longer match, keep its position and length. |
| 284 | mLen = ml |
| 285 | offset = si - next |
| 286 | // Try another previous position with the same hash. |
| 287 | try-- |
| 288 | } |
| 289 | chainTable[si&winMask] = hashTable[h] |
| 290 | hashTable[h] = si |
| 291 | |
| 292 | // No match found. |
| 293 | if mLen == 0 { |
| 294 | si += 1 + (si-anchor)>>adaptSkipLog |
| 295 | continue |
| 296 | } |
| 297 | |
| 298 | // Match found. |
| 299 | // Update hash/chain tables with overlapping bytes: |
| 300 | // si already hashed, add everything from si+1 up to the match length. |
| 301 | winStart := si + 1 |
| 302 | if ws := si + mLen - winSize; ws > winStart { |
| 303 | winStart = ws |
| 304 | } |
| 305 | for si, ml := winStart, si+mLen; si < ml; { |
| 306 | match >>= 8 |
| 307 | match |= uint32(src[si+3]) << 24 |
| 308 | h := blockHashHC(match) |
| 309 | chainTable[si&winMask] = hashTable[h] |
| 310 | hashTable[h] = si |
| 311 | si++ |
| 312 | } |
| 313 | |
| 314 | lLen := si - anchor |
| 315 | si += mLen |
| 316 | mLen -= minMatch // Match length does not include minMatch. |
| 317 | |
| 318 | if mLen < 0xF { |
| 319 | dst[di] = byte(mLen) |
| 320 | } else { |
| 321 | dst[di] = 0xF |
| 322 | } |
| 323 | |
| 324 | // Encode literals length. |
| 325 | if lLen < 0xF { |
| 326 | dst[di] |= byte(lLen << 4) |
| 327 | } else { |
| 328 | dst[di] |= 0xF0 |
| 329 | di++ |
| 330 | l := lLen - 0xF |
| 331 | for ; l >= 0xFF; l -= 0xFF { |
| 332 | dst[di] = 0xFF |
| 333 | di++ |
| 334 | } |
| 335 | dst[di] = byte(l) |
| 336 | } |
| 337 | di++ |
| 338 | |
| 339 | // Literals. |
| 340 | copy(dst[di:di+lLen], src[anchor:anchor+lLen]) |
| 341 | di += lLen |
| 342 | anchor = si |
| 343 | |
| 344 | // Encode offset. |
| 345 | di += 2 |
| 346 | dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) |
| 347 | |
| 348 | // Encode match length part 2. |
| 349 | if mLen >= 0xF { |
| 350 | for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { |
| 351 | dst[di] = 0xFF |
| 352 | di++ |
| 353 | } |
| 354 | dst[di] = byte(mLen) |
| 355 | di++ |
| 356 | } |
| 357 | } |
| 358 | |
| 359 | if anchor == 0 { |
| 360 | // Incompressible. |
| 361 | return 0, nil |
| 362 | } |
| 363 | |
| 364 | // Last literals. |
| 365 | lLen := len(src) - anchor |
| 366 | if lLen < 0xF { |
| 367 | dst[di] = byte(lLen << 4) |
| 368 | } else { |
| 369 | dst[di] = 0xF0 |
| 370 | di++ |
| 371 | lLen -= 0xF |
| 372 | for ; lLen >= 0xFF; lLen -= 0xFF { |
| 373 | dst[di] = 0xFF |
| 374 | di++ |
| 375 | } |
| 376 | dst[di] = byte(lLen) |
| 377 | } |
| 378 | di++ |
| 379 | |
| 380 | // Write the last literals. |
| 381 | if di >= anchor { |
| 382 | // Incompressible. |
| 383 | return 0, nil |
| 384 | } |
| 385 | di += copy(dst[di:di+len(src)-anchor], src[anchor:]) |
| 386 | return di, nil |
| 387 | } |