blob: 4d14542facf383c98798c29e239d603bca3aee75 [file] [log] [blame]
khenaidoo7d3c5582021-08-11 18:09:44 -04001package huff0
2
3import (
4 "fmt"
Akash Reddy Kankanalac28f0e22025-06-16 11:00:55 +05305 "math"
khenaidoo7d3c5582021-08-11 18:09:44 -04006 "runtime"
7 "sync"
8)
9
10// Compress1X will compress the input.
11// The output can be decoded using Decompress1X.
12// Supply a Scratch object. The scratch object contains state about re-use,
13// So when sharing across independent encodes, be sure to set the re-use policy.
14func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
15 s, err = s.prepare(in)
16 if err != nil {
17 return nil, false, err
18 }
19 return compress(in, s, s.compress1X)
20}
21
22// Compress4X will compress the input. The input is split into 4 independent blocks
23// and compressed similar to Compress1X.
24// The output can be decoded using Decompress4X.
25// Supply a Scratch object. The scratch object contains state about re-use,
26// So when sharing across independent encodes, be sure to set the re-use policy.
27func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
28 s, err = s.prepare(in)
29 if err != nil {
30 return nil, false, err
31 }
32 if false {
33 // TODO: compress4Xp only slightly faster.
34 const parallelThreshold = 8 << 10
35 if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
36 return compress(in, s, s.compress4X)
37 }
38 return compress(in, s, s.compress4Xp)
39 }
40 return compress(in, s, s.compress4X)
41}
42
43func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
44 // Nuke previous table if we cannot reuse anyway.
45 if s.Reuse == ReusePolicyNone {
46 s.prevTable = s.prevTable[:0]
47 }
48
49 // Create histogram, if none was provided.
50 maxCount := s.maxCount
51 var canReuse = false
52 if maxCount == 0 {
53 maxCount, canReuse = s.countSimple(in)
54 } else {
55 canReuse = s.canUseTable(s.prevTable)
56 }
57
58 // We want the output size to be less than this:
59 wantSize := len(in)
60 if s.WantLogLess > 0 {
61 wantSize -= wantSize >> s.WantLogLess
62 }
63
64 // Reset for next run.
65 s.clearCount = true
66 s.maxCount = 0
67 if maxCount >= len(in) {
68 if maxCount > len(in) {
69 return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
70 }
71 if len(in) == 1 {
72 return nil, false, ErrIncompressible
73 }
74 // One symbol, use RLE
75 return nil, false, ErrUseRLE
76 }
77 if maxCount == 1 || maxCount < (len(in)>>7) {
78 // Each symbol present maximum once or too well distributed.
79 return nil, false, ErrIncompressible
80 }
81 if s.Reuse == ReusePolicyMust && !canReuse {
82 // We must reuse, but we can't.
83 return nil, false, ErrIncompressible
84 }
85 if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
86 keepTable := s.cTable
87 keepTL := s.actualTableLog
88 s.cTable = s.prevTable
89 s.actualTableLog = s.prevTableLog
90 s.Out, err = compressor(in)
91 s.cTable = keepTable
92 s.actualTableLog = keepTL
93 if err == nil && len(s.Out) < wantSize {
94 s.OutData = s.Out
95 return s.Out, true, nil
96 }
97 if s.Reuse == ReusePolicyMust {
98 return nil, false, ErrIncompressible
99 }
100 // Do not attempt to re-use later.
101 s.prevTable = s.prevTable[:0]
102 }
103
104 // Calculate new table.
105 err = s.buildCTable()
106 if err != nil {
107 return nil, false, err
108 }
109
110 if false && !s.canUseTable(s.cTable) {
111 panic("invalid table generated")
112 }
113
114 if s.Reuse == ReusePolicyAllow && canReuse {
115 hSize := len(s.Out)
116 oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
117 newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
118 if oldSize <= hSize+newSize || hSize+12 >= wantSize {
119 // Retain cTable even if we re-use.
120 keepTable := s.cTable
121 keepTL := s.actualTableLog
122
123 s.cTable = s.prevTable
124 s.actualTableLog = s.prevTableLog
125 s.Out, err = compressor(in)
126
127 // Restore ctable.
128 s.cTable = keepTable
129 s.actualTableLog = keepTL
130 if err != nil {
131 return nil, false, err
132 }
133 if len(s.Out) >= wantSize {
134 return nil, false, ErrIncompressible
135 }
136 s.OutData = s.Out
137 return s.Out, true, nil
138 }
139 }
140
141 // Use new table
142 err = s.cTable.write(s)
143 if err != nil {
144 s.OutTable = nil
145 return nil, false, err
146 }
147 s.OutTable = s.Out
148
149 // Compress using new table
150 s.Out, err = compressor(in)
151 if err != nil {
152 s.OutTable = nil
153 return nil, false, err
154 }
155 if len(s.Out) >= wantSize {
156 s.OutTable = nil
157 return nil, false, ErrIncompressible
158 }
159 // Move current table into previous.
160 s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
161 s.OutData = s.Out[len(s.OutTable):]
162 return s.Out, false, nil
163}
164
Akash Reddy Kankanalac28f0e22025-06-16 11:00:55 +0530165// EstimateSizes will estimate the data sizes
166func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
167 s, err = s.prepare(in)
168 if err != nil {
169 return 0, 0, 0, err
170 }
171
172 // Create histogram, if none was provided.
173 tableSz, dataSz, reuseSz = -1, -1, -1
174 maxCount := s.maxCount
175 var canReuse = false
176 if maxCount == 0 {
177 maxCount, canReuse = s.countSimple(in)
178 } else {
179 canReuse = s.canUseTable(s.prevTable)
180 }
181
182 // We want the output size to be less than this:
183 wantSize := len(in)
184 if s.WantLogLess > 0 {
185 wantSize -= wantSize >> s.WantLogLess
186 }
187
188 // Reset for next run.
189 s.clearCount = true
190 s.maxCount = 0
191 if maxCount >= len(in) {
192 if maxCount > len(in) {
193 return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
194 }
195 if len(in) == 1 {
196 return 0, 0, 0, ErrIncompressible
197 }
198 // One symbol, use RLE
199 return 0, 0, 0, ErrUseRLE
200 }
201 if maxCount == 1 || maxCount < (len(in)>>7) {
202 // Each symbol present maximum once or too well distributed.
203 return 0, 0, 0, ErrIncompressible
204 }
205
206 // Calculate new table.
207 err = s.buildCTable()
208 if err != nil {
209 return 0, 0, 0, err
210 }
211
212 if false && !s.canUseTable(s.cTable) {
213 panic("invalid table generated")
214 }
215
216 tableSz, err = s.cTable.estTableSize(s)
217 if err != nil {
218 return 0, 0, 0, err
219 }
220 if canReuse {
221 reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
222 }
223 dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
224
225 // Restore
226 return tableSz, dataSz, reuseSz, nil
227}
228
khenaidoo7d3c5582021-08-11 18:09:44 -0400229func (s *Scratch) compress1X(src []byte) ([]byte, error) {
230 return s.compress1xDo(s.Out, src)
231}
232
233func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
234 var bw = bitWriter{out: dst}
235
236 // N is length divisible by 4.
237 n := len(src)
238 n -= n & 3
239 cTable := s.cTable[:256]
240
241 // Encode last bytes.
242 for i := len(src) & 3; i > 0; i-- {
243 bw.encSymbol(cTable, src[n+i-1])
244 }
245 n -= 4
246 if s.actualTableLog <= 8 {
247 for ; n >= 0; n -= 4 {
248 tmp := src[n : n+4]
249 // tmp should be len 4
250 bw.flush32()
251 bw.encTwoSymbols(cTable, tmp[3], tmp[2])
252 bw.encTwoSymbols(cTable, tmp[1], tmp[0])
253 }
254 } else {
255 for ; n >= 0; n -= 4 {
256 tmp := src[n : n+4]
257 // tmp should be len 4
258 bw.flush32()
259 bw.encTwoSymbols(cTable, tmp[3], tmp[2])
260 bw.flush32()
261 bw.encTwoSymbols(cTable, tmp[1], tmp[0])
262 }
263 }
264 err := bw.close()
265 return bw.out, err
266}
267
268var sixZeros [6]byte
269
270func (s *Scratch) compress4X(src []byte) ([]byte, error) {
271 if len(src) < 12 {
272 return nil, ErrIncompressible
273 }
274 segmentSize := (len(src) + 3) / 4
275
276 // Add placeholder for output length
277 offsetIdx := len(s.Out)
278 s.Out = append(s.Out, sixZeros[:]...)
279
280 for i := 0; i < 4; i++ {
281 toDo := src
282 if len(toDo) > segmentSize {
283 toDo = toDo[:segmentSize]
284 }
285 src = src[len(toDo):]
286
287 var err error
288 idx := len(s.Out)
289 s.Out, err = s.compress1xDo(s.Out, toDo)
290 if err != nil {
291 return nil, err
292 }
Akash Reddy Kankanalac28f0e22025-06-16 11:00:55 +0530293 if len(s.Out)-idx > math.MaxUint16 {
294 // We cannot store the size in the jump table
295 return nil, ErrIncompressible
296 }
khenaidoo7d3c5582021-08-11 18:09:44 -0400297 // Write compressed length as little endian before block.
298 if i < 3 {
299 // Last length is not written.
300 length := len(s.Out) - idx
301 s.Out[i*2+offsetIdx] = byte(length)
302 s.Out[i*2+offsetIdx+1] = byte(length >> 8)
303 }
304 }
305
306 return s.Out, nil
307}
308
309// compress4Xp will compress 4 streams using separate goroutines.
310func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
311 if len(src) < 12 {
312 return nil, ErrIncompressible
313 }
314 // Add placeholder for output length
315 s.Out = s.Out[:6]
316
317 segmentSize := (len(src) + 3) / 4
318 var wg sync.WaitGroup
319 var errs [4]error
320 wg.Add(4)
321 for i := 0; i < 4; i++ {
322 toDo := src
323 if len(toDo) > segmentSize {
324 toDo = toDo[:segmentSize]
325 }
326 src = src[len(toDo):]
327
328 // Separate goroutine for each block.
329 go func(i int) {
330 s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
331 wg.Done()
332 }(i)
333 }
334 wg.Wait()
335 for i := 0; i < 4; i++ {
336 if errs[i] != nil {
337 return nil, errs[i]
338 }
339 o := s.tmpOut[i]
Akash Reddy Kankanalac28f0e22025-06-16 11:00:55 +0530340 if len(o) > math.MaxUint16 {
341 // We cannot store the size in the jump table
342 return nil, ErrIncompressible
343 }
khenaidoo7d3c5582021-08-11 18:09:44 -0400344 // Write compressed length as little endian before block.
345 if i < 3 {
346 // Last length is not written.
347 s.Out[i*2] = byte(len(o))
348 s.Out[i*2+1] = byte(len(o) >> 8)
349 }
350
351 // Write output.
352 s.Out = append(s.Out, o...)
353 }
354 return s.Out, nil
355}
356
357// countSimple will create a simple histogram in s.count.
358// Returns the biggest count.
359// Does not update s.clearCount.
360func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
361 reuse = true
362 for _, v := range in {
363 s.count[v]++
364 }
365 m := uint32(0)
366 if len(s.prevTable) > 0 {
367 for i, v := range s.count[:] {
368 if v > m {
369 m = v
370 }
371 if v > 0 {
372 s.symbolLen = uint16(i) + 1
373 if i >= len(s.prevTable) {
374 reuse = false
375 } else {
376 if s.prevTable[i].nBits == 0 {
377 reuse = false
378 }
379 }
380 }
381 }
382 return int(m), reuse
383 }
384 for i, v := range s.count[:] {
385 if v > m {
386 m = v
387 }
388 if v > 0 {
389 s.symbolLen = uint16(i) + 1
390 }
391 }
392 return int(m), false
393}
394
395func (s *Scratch) canUseTable(c cTable) bool {
396 if len(c) < int(s.symbolLen) {
397 return false
398 }
399 for i, v := range s.count[:s.symbolLen] {
400 if v != 0 && c[i].nBits == 0 {
401 return false
402 }
403 }
404 return true
405}
406
Akash Reddy Kankanalac28f0e22025-06-16 11:00:55 +0530407//lint:ignore U1000 used for debugging
khenaidoo7d3c5582021-08-11 18:09:44 -0400408func (s *Scratch) validateTable(c cTable) bool {
409 if len(c) < int(s.symbolLen) {
410 return false
411 }
412 for i, v := range s.count[:s.symbolLen] {
413 if v != 0 {
414 if c[i].nBits == 0 {
415 return false
416 }
417 if c[i].nBits > s.actualTableLog {
418 return false
419 }
420 }
421 }
422 return true
423}
424
425// minTableLog provides the minimum logSize to safely represent a distribution.
426func (s *Scratch) minTableLog() uint8 {
427 minBitsSrc := highBit32(uint32(s.br.remain())) + 1
428 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
429 if minBitsSrc < minBitsSymbols {
430 return uint8(minBitsSrc)
431 }
432 return uint8(minBitsSymbols)
433}
434
435// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
436func (s *Scratch) optimalTableLog() {
437 tableLog := s.TableLog
438 minBits := s.minTableLog()
439 maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
440 if maxBitsSrc < tableLog {
441 // Accuracy can be reduced
442 tableLog = maxBitsSrc
443 }
444 if minBits > tableLog {
445 tableLog = minBits
446 }
447 // Need a minimum to safely represent all symbol values
448 if tableLog < minTablelog {
449 tableLog = minTablelog
450 }
451 if tableLog > tableLogMax {
452 tableLog = tableLogMax
453 }
454 s.actualTableLog = tableLog
455}
456
457type cTableEntry struct {
458 val uint16
459 nBits uint8
460 // We have 8 bits extra
461}
462
463const huffNodesMask = huffNodesLen - 1
464
465func (s *Scratch) buildCTable() error {
466 s.optimalTableLog()
467 s.huffSort()
468 if cap(s.cTable) < maxSymbolValue+1 {
469 s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
470 } else {
471 s.cTable = s.cTable[:s.symbolLen]
472 for i := range s.cTable {
473 s.cTable[i] = cTableEntry{}
474 }
475 }
476
477 var startNode = int16(s.symbolLen)
478 nonNullRank := s.symbolLen - 1
479
480 nodeNb := startNode
481 huffNode := s.nodes[1 : huffNodesLen+1]
482
483 // This overlays the slice above, but allows "-1" index lookups.
484 // Different from reference implementation.
485 huffNode0 := s.nodes[0 : huffNodesLen+1]
486
487 for huffNode[nonNullRank].count == 0 {
488 nonNullRank--
489 }
490
491 lowS := int16(nonNullRank)
492 nodeRoot := nodeNb + lowS - 1
493 lowN := nodeNb
494 huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
495 huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
496 nodeNb++
497 lowS -= 2
498 for n := nodeNb; n <= nodeRoot; n++ {
499 huffNode[n].count = 1 << 30
500 }
501 // fake entry, strong barrier
502 huffNode0[0].count = 1 << 31
503
504 // create parents
505 for nodeNb <= nodeRoot {
506 var n1, n2 int16
507 if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
508 n1 = lowS
509 lowS--
510 } else {
511 n1 = lowN
512 lowN++
513 }
514 if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
515 n2 = lowS
516 lowS--
517 } else {
518 n2 = lowN
519 lowN++
520 }
521
522 huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
523 huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
524 nodeNb++
525 }
526
527 // distribute weights (unlimited tree height)
528 huffNode[nodeRoot].nbBits = 0
529 for n := nodeRoot - 1; n >= startNode; n-- {
530 huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
531 }
532 for n := uint16(0); n <= nonNullRank; n++ {
533 huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
534 }
535 s.actualTableLog = s.setMaxHeight(int(nonNullRank))
536 maxNbBits := s.actualTableLog
537
538 // fill result into tree (val, nbBits)
539 if maxNbBits > tableLogMax {
540 return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
541 }
542 var nbPerRank [tableLogMax + 1]uint16
543 var valPerRank [16]uint16
544 for _, v := range huffNode[:nonNullRank+1] {
545 nbPerRank[v.nbBits]++
546 }
547 // determine stating value per rank
548 {
549 min := uint16(0)
550 for n := maxNbBits; n > 0; n-- {
551 // get starting value within each rank
552 valPerRank[n] = min
553 min += nbPerRank[n]
554 min >>= 1
555 }
556 }
557
558 // push nbBits per symbol, symbol order
559 for _, v := range huffNode[:nonNullRank+1] {
560 s.cTable[v.symbol].nBits = v.nbBits
561 }
562
563 // assign value within rank, symbol order
564 t := s.cTable[:s.symbolLen]
565 for n, val := range t {
566 nbits := val.nBits & 15
567 v := valPerRank[nbits]
568 t[n].val = v
569 valPerRank[nbits] = v + 1
570 }
571
572 return nil
573}
574
575// huffSort will sort symbols, decreasing order.
576func (s *Scratch) huffSort() {
577 type rankPos struct {
578 base uint32
579 current uint32
580 }
581
582 // Clear nodes
583 nodes := s.nodes[:huffNodesLen+1]
584 s.nodes = nodes
585 nodes = nodes[1 : huffNodesLen+1]
586
587 // Sort into buckets based on length of symbol count.
588 var rank [32]rankPos
589 for _, v := range s.count[:s.symbolLen] {
590 r := highBit32(v+1) & 31
591 rank[r].base++
592 }
593 // maxBitLength is log2(BlockSizeMax) + 1
594 const maxBitLength = 18 + 1
595 for n := maxBitLength; n > 0; n-- {
596 rank[n-1].base += rank[n].base
597 }
598 for n := range rank[:maxBitLength] {
599 rank[n].current = rank[n].base
600 }
601 for n, c := range s.count[:s.symbolLen] {
602 r := (highBit32(c+1) + 1) & 31
603 pos := rank[r].current
604 rank[r].current++
605 prev := nodes[(pos-1)&huffNodesMask]
606 for pos > rank[r].base && c > prev.count {
607 nodes[pos&huffNodesMask] = prev
608 pos--
609 prev = nodes[(pos-1)&huffNodesMask]
610 }
611 nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
612 }
613}
614
615func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
616 maxNbBits := s.actualTableLog
617 huffNode := s.nodes[1 : huffNodesLen+1]
618 //huffNode = huffNode[: huffNodesLen]
619
620 largestBits := huffNode[lastNonNull].nbBits
621
622 // early exit : no elt > maxNbBits
623 if largestBits <= maxNbBits {
624 return largestBits
625 }
626 totalCost := int(0)
627 baseCost := int(1) << (largestBits - maxNbBits)
628 n := uint32(lastNonNull)
629
630 for huffNode[n].nbBits > maxNbBits {
631 totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
632 huffNode[n].nbBits = maxNbBits
633 n--
634 }
635 // n stops at huffNode[n].nbBits <= maxNbBits
636
637 for huffNode[n].nbBits == maxNbBits {
638 n--
639 }
640 // n end at index of smallest symbol using < maxNbBits
641
642 // renorm totalCost
643 totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
644
645 // repay normalized cost
646 {
647 const noSymbol = 0xF0F0F0F0
648 var rankLast [tableLogMax + 2]uint32
649
650 for i := range rankLast[:] {
651 rankLast[i] = noSymbol
652 }
653
654 // Get pos of last (smallest) symbol per rank
655 {
656 currentNbBits := maxNbBits
657 for pos := int(n); pos >= 0; pos-- {
658 if huffNode[pos].nbBits >= currentNbBits {
659 continue
660 }
661 currentNbBits = huffNode[pos].nbBits // < maxNbBits
662 rankLast[maxNbBits-currentNbBits] = uint32(pos)
663 }
664 }
665
666 for totalCost > 0 {
667 nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
668
669 for ; nBitsToDecrease > 1; nBitsToDecrease-- {
670 highPos := rankLast[nBitsToDecrease]
671 lowPos := rankLast[nBitsToDecrease-1]
672 if highPos == noSymbol {
673 continue
674 }
675 if lowPos == noSymbol {
676 break
677 }
678 highTotal := huffNode[highPos].count
679 lowTotal := 2 * huffNode[lowPos].count
680 if highTotal <= lowTotal {
681 break
682 }
683 }
684 // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
685 // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
686 // FIXME: try to remove
687 for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
688 nBitsToDecrease++
689 }
690 totalCost -= 1 << (nBitsToDecrease - 1)
691 if rankLast[nBitsToDecrease-1] == noSymbol {
692 // this rank is no longer empty
693 rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
694 }
695 huffNode[rankLast[nBitsToDecrease]].nbBits++
696 if rankLast[nBitsToDecrease] == 0 {
697 /* special case, reached largest symbol */
698 rankLast[nBitsToDecrease] = noSymbol
699 } else {
700 rankLast[nBitsToDecrease]--
701 if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
702 rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
703 }
704 }
705 }
706
707 for totalCost < 0 { /* Sometimes, cost correction overshoot */
708 if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
709 for huffNode[n].nbBits == maxNbBits {
710 n--
711 }
712 huffNode[n+1].nbBits--
713 rankLast[1] = n + 1
714 totalCost++
715 continue
716 }
717 huffNode[rankLast[1]+1].nbBits--
718 rankLast[1]++
719 totalCost++
720 }
721 }
722 return maxNbBits
723}
724
725type nodeElt struct {
726 count uint32
727 parent uint16
728 symbol byte
729 nbBits uint8
730}