khenaidoo | 7d3c558 | 2021-08-11 18:09:44 -0400 | [diff] [blame] | 1 | // Package huff0 provides fast huffman encoding as used in zstd. |
| 2 | // |
| 3 | // See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. |
| 4 | package huff0 |
| 5 | |
| 6 | import ( |
| 7 | "errors" |
| 8 | "fmt" |
| 9 | "math" |
| 10 | "math/bits" |
| 11 | |
| 12 | "github.com/klauspost/compress/fse" |
| 13 | ) |
| 14 | |
| 15 | const ( |
| 16 | maxSymbolValue = 255 |
| 17 | |
| 18 | // zstandard limits tablelog to 11, see: |
| 19 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description |
| 20 | tableLogMax = 11 |
| 21 | tableLogDefault = 11 |
| 22 | minTablelog = 5 |
| 23 | huffNodesLen = 512 |
| 24 | |
| 25 | // BlockSizeMax is maximum input size for a single block uncompressed. |
| 26 | BlockSizeMax = 1<<18 - 1 |
| 27 | ) |
| 28 | |
| 29 | var ( |
| 30 | // ErrIncompressible is returned when input is judged to be too hard to compress. |
| 31 | ErrIncompressible = errors.New("input is not compressible") |
| 32 | |
| 33 | // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. |
| 34 | ErrUseRLE = errors.New("input is single value repeated") |
| 35 | |
| 36 | // ErrTooBig is return if input is too large for a single block. |
| 37 | ErrTooBig = errors.New("input too big") |
| 38 | |
| 39 | // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. |
| 40 | ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") |
| 41 | ) |
| 42 | |
| 43 | type ReusePolicy uint8 |
| 44 | |
| 45 | const ( |
| 46 | // ReusePolicyAllow will allow reuse if it produces smaller output. |
| 47 | ReusePolicyAllow ReusePolicy = iota |
| 48 | |
| 49 | // ReusePolicyPrefer will re-use aggressively if possible. |
| 50 | // This will not check if a new table will produce smaller output, |
| 51 | // except if the current table is impossible to use or |
| 52 | // compressed output is bigger than input. |
| 53 | ReusePolicyPrefer |
| 54 | |
| 55 | // ReusePolicyNone will disable re-use of tables. |
| 56 | // This is slightly faster than ReusePolicyAllow but may produce larger output. |
| 57 | ReusePolicyNone |
| 58 | |
| 59 | // ReusePolicyMust must allow reuse and produce smaller output. |
| 60 | ReusePolicyMust |
| 61 | ) |
| 62 | |
| 63 | type Scratch struct { |
| 64 | count [maxSymbolValue + 1]uint32 |
| 65 | |
| 66 | // Per block parameters. |
| 67 | // These can be used to override compression parameters of the block. |
| 68 | // Do not touch, unless you know what you are doing. |
| 69 | |
| 70 | // Out is output buffer. |
| 71 | // If the scratch is re-used before the caller is done processing the output, |
| 72 | // set this field to nil. |
| 73 | // Otherwise the output buffer will be re-used for next Compression/Decompression step |
| 74 | // and allocation will be avoided. |
| 75 | Out []byte |
| 76 | |
| 77 | // OutTable will contain the table data only, if a new table has been generated. |
| 78 | // Slice of the returned data. |
| 79 | OutTable []byte |
| 80 | |
| 81 | // OutData will contain the compressed data. |
| 82 | // Slice of the returned data. |
| 83 | OutData []byte |
| 84 | |
| 85 | // MaxDecodedSize will set the maximum allowed output size. |
| 86 | // This value will automatically be set to BlockSizeMax if not set. |
| 87 | // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. |
| 88 | MaxDecodedSize int |
| 89 | |
| 90 | br byteReader |
| 91 | |
| 92 | // MaxSymbolValue will override the maximum symbol value of the next block. |
| 93 | MaxSymbolValue uint8 |
| 94 | |
| 95 | // TableLog will attempt to override the tablelog for the next block. |
| 96 | // Must be <= 11 and >= 5. |
| 97 | TableLog uint8 |
| 98 | |
| 99 | // Reuse will specify the reuse policy |
| 100 | Reuse ReusePolicy |
| 101 | |
| 102 | // WantLogLess allows to specify a log 2 reduction that should at least be achieved, |
| 103 | // otherwise the block will be returned as incompressible. |
| 104 | // The reduction should then at least be (input size >> WantLogLess) |
| 105 | // If WantLogLess == 0 any improvement will do. |
| 106 | WantLogLess uint8 |
| 107 | |
| 108 | symbolLen uint16 // Length of active part of the symbol table. |
| 109 | maxCount int // count of the most probable symbol |
| 110 | clearCount bool // clear count |
| 111 | actualTableLog uint8 // Selected tablelog. |
| 112 | prevTableLog uint8 // Tablelog for previous table |
| 113 | prevTable cTable // Table used for previous compression. |
| 114 | cTable cTable // compression table |
| 115 | dt dTable // decompression table |
| 116 | nodes []nodeElt |
| 117 | tmpOut [4][]byte |
| 118 | fse *fse.Scratch |
| 119 | huffWeight [maxSymbolValue + 1]byte |
| 120 | } |
| 121 | |
| 122 | // TransferCTable will transfer the previously used compression table. |
| 123 | func (s *Scratch) TransferCTable(src *Scratch) { |
| 124 | if cap(s.prevTable) < len(src.prevTable) { |
| 125 | s.prevTable = make(cTable, 0, maxSymbolValue+1) |
| 126 | } |
| 127 | s.prevTable = s.prevTable[:len(src.prevTable)] |
| 128 | copy(s.prevTable, src.prevTable) |
| 129 | s.prevTableLog = src.prevTableLog |
| 130 | } |
| 131 | |
| 132 | func (s *Scratch) prepare(in []byte) (*Scratch, error) { |
| 133 | if len(in) > BlockSizeMax { |
| 134 | return nil, ErrTooBig |
| 135 | } |
| 136 | if s == nil { |
| 137 | s = &Scratch{} |
| 138 | } |
| 139 | if s.MaxSymbolValue == 0 { |
| 140 | s.MaxSymbolValue = maxSymbolValue |
| 141 | } |
| 142 | if s.TableLog == 0 { |
| 143 | s.TableLog = tableLogDefault |
| 144 | } |
| 145 | if s.TableLog > tableLogMax || s.TableLog < minTablelog { |
| 146 | return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) |
| 147 | } |
| 148 | if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { |
| 149 | s.MaxDecodedSize = BlockSizeMax |
| 150 | } |
| 151 | if s.clearCount && s.maxCount == 0 { |
| 152 | for i := range s.count { |
| 153 | s.count[i] = 0 |
| 154 | } |
| 155 | s.clearCount = false |
| 156 | } |
| 157 | if cap(s.Out) == 0 { |
| 158 | s.Out = make([]byte, 0, len(in)) |
| 159 | } |
| 160 | s.Out = s.Out[:0] |
| 161 | |
| 162 | s.OutTable = nil |
| 163 | s.OutData = nil |
| 164 | if cap(s.nodes) < huffNodesLen+1 { |
| 165 | s.nodes = make([]nodeElt, 0, huffNodesLen+1) |
| 166 | } |
| 167 | s.nodes = s.nodes[:0] |
| 168 | if s.fse == nil { |
| 169 | s.fse = &fse.Scratch{} |
| 170 | } |
| 171 | s.br.init(in) |
| 172 | |
| 173 | return s, nil |
| 174 | } |
| 175 | |
| 176 | type cTable []cTableEntry |
| 177 | |
| 178 | func (c cTable) write(s *Scratch) error { |
| 179 | var ( |
| 180 | // precomputed conversion table |
| 181 | bitsToWeight [tableLogMax + 1]byte |
| 182 | huffLog = s.actualTableLog |
| 183 | // last weight is not saved. |
| 184 | maxSymbolValue = uint8(s.symbolLen - 1) |
| 185 | huffWeight = s.huffWeight[:256] |
| 186 | ) |
| 187 | const ( |
| 188 | maxFSETableLog = 6 |
| 189 | ) |
| 190 | // convert to weight |
| 191 | bitsToWeight[0] = 0 |
| 192 | for n := uint8(1); n < huffLog+1; n++ { |
| 193 | bitsToWeight[n] = huffLog + 1 - n |
| 194 | } |
| 195 | |
| 196 | // Acquire histogram for FSE. |
| 197 | hist := s.fse.Histogram() |
| 198 | hist = hist[:256] |
| 199 | for i := range hist[:16] { |
| 200 | hist[i] = 0 |
| 201 | } |
| 202 | for n := uint8(0); n < maxSymbolValue; n++ { |
| 203 | v := bitsToWeight[c[n].nBits] & 15 |
| 204 | huffWeight[n] = v |
| 205 | hist[v]++ |
| 206 | } |
| 207 | |
| 208 | // FSE compress if feasible. |
| 209 | if maxSymbolValue >= 2 { |
| 210 | huffMaxCnt := uint32(0) |
| 211 | huffMax := uint8(0) |
| 212 | for i, v := range hist[:16] { |
| 213 | if v == 0 { |
| 214 | continue |
| 215 | } |
| 216 | huffMax = byte(i) |
| 217 | if v > huffMaxCnt { |
| 218 | huffMaxCnt = v |
| 219 | } |
| 220 | } |
| 221 | s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) |
| 222 | s.fse.TableLog = maxFSETableLog |
| 223 | b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) |
| 224 | if err == nil && len(b) < int(s.symbolLen>>1) { |
| 225 | s.Out = append(s.Out, uint8(len(b))) |
| 226 | s.Out = append(s.Out, b...) |
| 227 | return nil |
| 228 | } |
| 229 | // Unable to compress (RLE/uncompressible) |
| 230 | } |
| 231 | // write raw values as 4-bits (max : 15) |
| 232 | if maxSymbolValue > (256 - 128) { |
| 233 | // should not happen : likely means source cannot be compressed |
| 234 | return ErrIncompressible |
| 235 | } |
| 236 | op := s.Out |
| 237 | // special case, pack weights 4 bits/weight. |
| 238 | op = append(op, 128|(maxSymbolValue-1)) |
| 239 | // be sure it doesn't cause msan issue in final combination |
| 240 | huffWeight[maxSymbolValue] = 0 |
| 241 | for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { |
| 242 | op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) |
| 243 | } |
| 244 | s.Out = op |
| 245 | return nil |
| 246 | } |
| 247 | |
| 248 | // estimateSize returns the estimated size in bytes of the input represented in the |
| 249 | // histogram supplied. |
| 250 | func (c cTable) estimateSize(hist []uint32) int { |
| 251 | nbBits := uint32(7) |
| 252 | for i, v := range c[:len(hist)] { |
| 253 | nbBits += uint32(v.nBits) * hist[i] |
| 254 | } |
| 255 | return int(nbBits >> 3) |
| 256 | } |
| 257 | |
| 258 | // minSize returns the minimum possible size considering the shannon limit. |
| 259 | func (s *Scratch) minSize(total int) int { |
| 260 | nbBits := float64(7) |
| 261 | fTotal := float64(total) |
| 262 | for _, v := range s.count[:s.symbolLen] { |
| 263 | n := float64(v) |
| 264 | if n > 0 { |
| 265 | nbBits += math.Log2(fTotal/n) * n |
| 266 | } |
| 267 | } |
| 268 | return int(nbBits) >> 3 |
| 269 | } |
| 270 | |
| 271 | func highBit32(val uint32) (n uint32) { |
| 272 | return uint32(bits.Len32(val) - 1) |
| 273 | } |