blob: 6f823f94d7511596fe9b81833ec31258d3b9ca9d [file] [log] [blame]
Dinesh Belwalkare63f7f92019-11-22 23:11:16 +00001// Package huff0 provides fast huffman encoding as used in zstd.
2//
3// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
4package huff0
5
6import (
7 "errors"
8 "fmt"
9 "math"
10 "math/bits"
11
12 "github.com/klauspost/compress/fse"
13)
14
15const (
16 maxSymbolValue = 255
17
18 // zstandard limits tablelog to 11, see:
19 // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
20 tableLogMax = 11
21 tableLogDefault = 11
22 minTablelog = 5
23 huffNodesLen = 512
24
25 // BlockSizeMax is maximum input size for a single block uncompressed.
26 BlockSizeMax = 1<<18 - 1
27)
28
29var (
30 // ErrIncompressible is returned when input is judged to be too hard to compress.
31 ErrIncompressible = errors.New("input is not compressible")
32
33 // ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
34 ErrUseRLE = errors.New("input is single value repeated")
35
36 // ErrTooBig is return if input is too large for a single block.
37 ErrTooBig = errors.New("input too big")
38
39 // ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
40 ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
41)
42
43type ReusePolicy uint8
44
45const (
46 // ReusePolicyAllow will allow reuse if it produces smaller output.
47 ReusePolicyAllow ReusePolicy = iota
48
49 // ReusePolicyPrefer will re-use aggressively if possible.
50 // This will not check if a new table will produce smaller output,
51 // except if the current table is impossible to use or
52 // compressed output is bigger than input.
53 ReusePolicyPrefer
54
55 // ReusePolicyNone will disable re-use of tables.
56 // This is slightly faster than ReusePolicyAllow but may produce larger output.
57 ReusePolicyNone
58)
59
60type Scratch struct {
61 count [maxSymbolValue + 1]uint32
62
63 // Per block parameters.
64 // These can be used to override compression parameters of the block.
65 // Do not touch, unless you know what you are doing.
66
67 // Out is output buffer.
68 // If the scratch is re-used before the caller is done processing the output,
69 // set this field to nil.
70 // Otherwise the output buffer will be re-used for next Compression/Decompression step
71 // and allocation will be avoided.
72 Out []byte
73
74 // OutTable will contain the table data only, if a new table has been generated.
75 // Slice of the returned data.
76 OutTable []byte
77
78 // OutData will contain the compressed data.
79 // Slice of the returned data.
80 OutData []byte
81
82 // MaxSymbolValue will override the maximum symbol value of the next block.
83 MaxSymbolValue uint8
84
85 // TableLog will attempt to override the tablelog for the next block.
86 // Must be <= 11.
87 TableLog uint8
88
89 // Reuse will specify the reuse policy
90 Reuse ReusePolicy
91
92 // MaxDecodedSize will set the maximum allowed output size.
93 // This value will automatically be set to BlockSizeMax if not set.
94 // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
95 MaxDecodedSize int
96
97 br byteReader
98 symbolLen uint16 // Length of active part of the symbol table.
99 maxCount int // count of the most probable symbol
100 clearCount bool // clear count
101 actualTableLog uint8 // Selected tablelog.
102 prevTable cTable // Table used for previous compression.
103 cTable cTable // compression table
104 dt dTable // decompression table
105 nodes []nodeElt
106 tmpOut [4][]byte
107 fse *fse.Scratch
108 huffWeight [maxSymbolValue + 1]byte
109}
110
111func (s *Scratch) prepare(in []byte) (*Scratch, error) {
112 if len(in) > BlockSizeMax {
113 return nil, ErrTooBig
114 }
115 if s == nil {
116 s = &Scratch{}
117 }
118 if s.MaxSymbolValue == 0 {
119 s.MaxSymbolValue = maxSymbolValue
120 }
121 if s.TableLog == 0 {
122 s.TableLog = tableLogDefault
123 }
124 if s.TableLog > tableLogMax {
125 return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
126 }
127 if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
128 s.MaxDecodedSize = BlockSizeMax
129 }
130 if s.clearCount && s.maxCount == 0 {
131 for i := range s.count {
132 s.count[i] = 0
133 }
134 s.clearCount = false
135 }
136 if cap(s.Out) == 0 {
137 s.Out = make([]byte, 0, len(in))
138 }
139 s.Out = s.Out[:0]
140
141 s.OutTable = nil
142 s.OutData = nil
143 if cap(s.nodes) < huffNodesLen+1 {
144 s.nodes = make([]nodeElt, 0, huffNodesLen+1)
145 }
146 s.nodes = s.nodes[:0]
147 if s.fse == nil {
148 s.fse = &fse.Scratch{}
149 }
150 s.br.init(in)
151
152 return s, nil
153}
154
155type cTable []cTableEntry
156
157func (c cTable) write(s *Scratch) error {
158 var (
159 // precomputed conversion table
160 bitsToWeight [tableLogMax + 1]byte
161 huffLog = s.actualTableLog
162 // last weight is not saved.
163 maxSymbolValue = uint8(s.symbolLen - 1)
164 huffWeight = s.huffWeight[:256]
165 )
166 const (
167 maxFSETableLog = 6
168 )
169 // convert to weight
170 bitsToWeight[0] = 0
171 for n := uint8(1); n < huffLog+1; n++ {
172 bitsToWeight[n] = huffLog + 1 - n
173 }
174
175 // Acquire histogram for FSE.
176 hist := s.fse.Histogram()
177 hist = hist[:256]
178 for i := range hist[:16] {
179 hist[i] = 0
180 }
181 for n := uint8(0); n < maxSymbolValue; n++ {
182 v := bitsToWeight[c[n].nBits] & 15
183 huffWeight[n] = v
184 hist[v]++
185 }
186
187 // FSE compress if feasible.
188 if maxSymbolValue >= 2 {
189 huffMaxCnt := uint32(0)
190 huffMax := uint8(0)
191 for i, v := range hist[:16] {
192 if v == 0 {
193 continue
194 }
195 huffMax = byte(i)
196 if v > huffMaxCnt {
197 huffMaxCnt = v
198 }
199 }
200 s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
201 s.fse.TableLog = maxFSETableLog
202 b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
203 if err == nil && len(b) < int(s.symbolLen>>1) {
204 s.Out = append(s.Out, uint8(len(b)))
205 s.Out = append(s.Out, b...)
206 return nil
207 }
208 // Unable to compress (RLE/uncompressible)
209 }
210 // write raw values as 4-bits (max : 15)
211 if maxSymbolValue > (256 - 128) {
212 // should not happen : likely means source cannot be compressed
213 return ErrIncompressible
214 }
215 op := s.Out
216 // special case, pack weights 4 bits/weight.
217 op = append(op, 128|(maxSymbolValue-1))
218 // be sure it doesn't cause msan issue in final combination
219 huffWeight[maxSymbolValue] = 0
220 for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
221 op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
222 }
223 s.Out = op
224 return nil
225}
226
227// estimateSize returns the estimated size in bytes of the input represented in the
228// histogram supplied.
229func (c cTable) estimateSize(hist []uint32) int {
230 nbBits := uint32(7)
231 for i, v := range c[:len(hist)] {
232 nbBits += uint32(v.nBits) * hist[i]
233 }
234 return int(nbBits >> 3)
235}
236
237// minSize returns the minimum possible size considering the shannon limit.
238func (s *Scratch) minSize(total int) int {
239 nbBits := float64(7)
240 fTotal := float64(total)
241 for _, v := range s.count[:s.symbolLen] {
242 n := float64(v)
243 if n > 0 {
244 nbBits += math.Log2(fTotal/n) * n
245 }
246 }
247 return int(nbBits) >> 3
248}
249
250func highBit32(val uint32) (n uint32) {
251 return uint32(bits.Len32(val) - 1)
252}