blob: 645a6e313c05fa3fb57271f2b21c41236346061f [file] [log] [blame]
William Kurkianea869482019-04-09 15:16:11 -04001/*
2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
Abhilash S.L3b494632019-07-16 15:51:09 +053012 /******************************************
13 * Includes
14 ******************************************/
15#include <stddef.h> /* size_t, ptrdiff_t */
16#include <string.h> /* memcpy */
17
William Kurkianea869482019-04-09 15:16:11 -040018#include "zstd_v04.h"
19#include "error_private.h"
20
21
22/* ******************************************************************
Abhilash S.L3b494632019-07-16 15:51:09 +053023 * mem.h
24 *******************************************************************/
William Kurkianea869482019-04-09 15:16:11 -040025#ifndef MEM_H_MODULE
26#define MEM_H_MODULE
27
28#if defined (__cplusplus)
29extern "C" {
30#endif
31
William Kurkianea869482019-04-09 15:16:11 -040032
33/******************************************
34* Compiler-specific
35******************************************/
36#if defined(_MSC_VER) /* Visual Studio */
37# include <stdlib.h> /* _byteswap_ulong */
38# include <intrin.h> /* _byteswap_* */
39#endif
40#if defined(__GNUC__)
41# define MEM_STATIC static __attribute__((unused))
42#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
43# define MEM_STATIC static inline
44#elif defined(_MSC_VER)
45# define MEM_STATIC static __inline
46#else
47# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
48#endif
49
50
51/****************************************************************
52* Basic Types
53*****************************************************************/
54#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
55# include <stdint.h>
56 typedef uint8_t BYTE;
57 typedef uint16_t U16;
58 typedef int16_t S16;
59 typedef uint32_t U32;
60 typedef int32_t S32;
61 typedef uint64_t U64;
62 typedef int64_t S64;
63#else
64 typedef unsigned char BYTE;
65 typedef unsigned short U16;
66 typedef signed short S16;
67 typedef unsigned int U32;
68 typedef signed int S32;
69 typedef unsigned long long U64;
70 typedef signed long long S64;
71#endif
72
73
74/*-*************************************
75* Debug
76***************************************/
Abhilash S.L3b494632019-07-16 15:51:09 +053077#include "debug.h"
78#ifndef assert
79# define assert(condition) ((void)0)
William Kurkianea869482019-04-09 15:16:11 -040080#endif
81
82
83/****************************************************************
84* Memory I/O
85*****************************************************************/
86/* MEM_FORCE_MEMORY_ACCESS
87 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
88 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
89 * The below switch allow to select different access method for improved performance.
90 * Method 0 (default) : use `memcpy()`. Safe and portable.
91 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
92 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
93 * Method 2 : direct access. This method is portable but violate C standard.
94 * It can generate buggy code on targets generating assembly depending on alignment.
95 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
96 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
97 * Prefer these methods in priority order (0 > 1 > 2)
98 */
99#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
100# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
101# define MEM_FORCE_MEMORY_ACCESS 2
102# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
103 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
104# define MEM_FORCE_MEMORY_ACCESS 1
105# endif
106#endif
107
108MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
109MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
110
111MEM_STATIC unsigned MEM_isLittleEndian(void)
112{
113 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
114 return one.c[0];
115}
116
117#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
118
119/* violates C standard on structure alignment.
120Only use if no other choice to achieve best performance on target platform */
121MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
122MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
123MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
124
125MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
126
127#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
128
129/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
130/* currently only defined for gcc and icc */
131typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
132
133MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
134MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
135MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
136
137MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
138
139#else
140
141/* default method, safe and standard.
142 can sometimes prove slower */
143
144MEM_STATIC U16 MEM_read16(const void* memPtr)
145{
146 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
147}
148
149MEM_STATIC U32 MEM_read32(const void* memPtr)
150{
151 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
152}
153
154MEM_STATIC U64 MEM_read64(const void* memPtr)
155{
156 U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
157}
158
159MEM_STATIC void MEM_write16(void* memPtr, U16 value)
160{
161 memcpy(memPtr, &value, sizeof(value));
162}
163
164#endif // MEM_FORCE_MEMORY_ACCESS
165
166
167MEM_STATIC U16 MEM_readLE16(const void* memPtr)
168{
169 if (MEM_isLittleEndian())
170 return MEM_read16(memPtr);
171 else
172 {
173 const BYTE* p = (const BYTE*)memPtr;
174 return (U16)(p[0] + (p[1]<<8));
175 }
176}
177
178MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
179{
180 if (MEM_isLittleEndian())
181 {
182 MEM_write16(memPtr, val);
183 }
184 else
185 {
186 BYTE* p = (BYTE*)memPtr;
187 p[0] = (BYTE)val;
188 p[1] = (BYTE)(val>>8);
189 }
190}
191
David Bainbridge788e5202019-10-21 18:49:40 +0000192MEM_STATIC U32 MEM_readLE24(const void* memPtr)
193{
194 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
195}
196
William Kurkianea869482019-04-09 15:16:11 -0400197MEM_STATIC U32 MEM_readLE32(const void* memPtr)
198{
199 if (MEM_isLittleEndian())
200 return MEM_read32(memPtr);
201 else
202 {
203 const BYTE* p = (const BYTE*)memPtr;
204 return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
205 }
206}
207
208
209MEM_STATIC U64 MEM_readLE64(const void* memPtr)
210{
211 if (MEM_isLittleEndian())
212 return MEM_read64(memPtr);
213 else
214 {
215 const BYTE* p = (const BYTE*)memPtr;
216 return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
217 + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
218 }
219}
220
221
222MEM_STATIC size_t MEM_readLEST(const void* memPtr)
223{
224 if (MEM_32bits())
225 return (size_t)MEM_readLE32(memPtr);
226 else
227 return (size_t)MEM_readLE64(memPtr);
228}
229
230
231#if defined (__cplusplus)
232}
233#endif
234
235#endif /* MEM_H_MODULE */
236
237/*
238 zstd - standard compression library
239 Header File for static linking only
240*/
241#ifndef ZSTD_STATIC_H
242#define ZSTD_STATIC_H
243
William Kurkianea869482019-04-09 15:16:11 -0400244
245/* *************************************
246* Types
247***************************************/
William Kurkianea869482019-04-09 15:16:11 -0400248#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
William Kurkianea869482019-04-09 15:16:11 -0400249
250/** from faster to stronger */
251typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
252
253typedef struct
254{
255 U64 srcSize; /* optional : tells how much bytes are present in the frame. Use 0 if not known. */
256 U32 windowLog; /* largest match distance : larger == more compression, more memory needed during decompression */
257 U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */
258 U32 hashLog; /* dispatch table : larger == more memory, faster */
259 U32 searchLog; /* nb of searches : larger == more compression, slower */
260 U32 searchLength; /* size of matches : larger == faster decompression, sometimes less compression */
261 ZSTD_strategy strategy;
262} ZSTD_parameters;
263
264typedef ZSTDv04_Dctx ZSTD_DCtx;
265
266/* *************************************
267* Advanced functions
268***************************************/
269/** ZSTD_decompress_usingDict
270* Same as ZSTD_decompressDCtx, using a Dictionary content as prefix
271* Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */
272static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
273 void* dst, size_t maxDstSize,
274 const void* src, size_t srcSize,
275 const void* dict,size_t dictSize);
276
277
278/* **************************************
279* Streaming functions (direct mode)
280****************************************/
281static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);
282static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);
283static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
284
285static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
286static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
287
288/**
289 Streaming decompression, bufferless mode
290
291 A ZSTD_DCtx object is required to track streaming operations.
292 Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
293 A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.
294
295 First operation is to retrieve frame parameters, using ZSTD_getFrameParams().
296 This function doesn't consume its input. It needs enough input data to properly decode the frame header.
297 Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
298 Result : 0 when successful, it means the ZSTD_parameters structure has been filled.
299 >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
300 errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)
301
302 Then, you can optionally insert a dictionary.
303 This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.
304
305 Then it's possible to start decompression.
306 Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
307 ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
308 ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
309 ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
310 They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
311
312 @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.
313 It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
314
315 A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
316 Context can then be reset to start a new decompression.
317*/
318
319
William Kurkianea869482019-04-09 15:16:11 -0400320
321
322#endif /* ZSTD_STATIC_H */
323
324
325/*
326 zstd_internal - common functions to include
327 Header File for include
328*/
329#ifndef ZSTD_CCOMMON_H_MODULE
330#define ZSTD_CCOMMON_H_MODULE
331
William Kurkianea869482019-04-09 15:16:11 -0400332/* *************************************
333* Common macros
334***************************************/
335#define MIN(a,b) ((a)<(b) ? (a) : (b))
336#define MAX(a,b) ((a)>(b) ? (a) : (b))
337
338
339/* *************************************
340* Common constants
341***************************************/
342#define ZSTD_MAGICNUMBER 0xFD2FB524 /* v0.4 */
343
344#define KB *(1 <<10)
345#define MB *(1 <<20)
346#define GB *(1U<<30)
347
348#define BLOCKSIZE (128 KB) /* define, for static allocation */
349
350static const size_t ZSTD_blockHeaderSize = 3;
351static const size_t ZSTD_frameHeaderSize_min = 5;
352#define ZSTD_frameHeaderSize_max 5 /* define, for static allocation */
353
354#define BIT7 128
355#define BIT6 64
356#define BIT5 32
357#define BIT4 16
358#define BIT1 2
359#define BIT0 1
360
361#define IS_RAW BIT0
362#define IS_RLE BIT1
363
364#define MINMATCH 4
365#define REPCODE_STARTVALUE 4
366
367#define MLbits 7
368#define LLbits 6
369#define Offbits 5
370#define MaxML ((1<<MLbits) - 1)
371#define MaxLL ((1<<LLbits) - 1)
372#define MaxOff ((1<<Offbits)- 1)
373#define MLFSELog 10
374#define LLFSELog 10
375#define OffFSELog 9
376#define MaxSeq MAX(MaxLL, MaxML)
377
378#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
379#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
380
Abhilash S.L3b494632019-07-16 15:51:09 +0530381#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
382
William Kurkianea869482019-04-09 15:16:11 -0400383typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
384
385
386/* ******************************************
387* Shared functions to include for inlining
388********************************************/
389static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
390
391#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
392
393/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
394static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
395{
396 const BYTE* ip = (const BYTE*)src;
397 BYTE* op = (BYTE*)dst;
398 BYTE* const oend = op + length;
399 do
400 COPY8(op, ip)
401 while (op < oend);
402}
403
404
William Kurkianea869482019-04-09 15:16:11 -0400405
406/* ******************************************************************
407 FSE : Finite State Entropy coder
408 header file
409****************************************************************** */
410#ifndef FSE_H
411#define FSE_H
412
413#if defined (__cplusplus)
414extern "C" {
415#endif
416
417
418/* *****************************************
419* Includes
420******************************************/
421#include <stddef.h> /* size_t, ptrdiff_t */
422
423
424/* *****************************************
425* FSE simple functions
426******************************************/
427static size_t FSE_decompress(void* dst, size_t maxDstSize,
428 const void* cSrc, size_t cSrcSize);
429/*!
430FSE_decompress():
431 Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
432 into already allocated destination buffer 'dst', of size 'maxDstSize'.
433 return : size of regenerated data (<= maxDstSize)
434 or an error code, which can be tested using FSE_isError()
435
436 ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!
437 Why ? : making this distinction requires a header.
438 Header management is intentionally delegated to the user layer, which can better manage special cases.
439*/
440
441
442/* *****************************************
443* Tool functions
444******************************************/
445/* Error Management */
446static unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
447
448
449
450/* *****************************************
451* FSE detailed API
452******************************************/
453/*!
454FSE_compress() does the following:
4551. count symbol occurrence from source[] into table count[]
4562. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
4573. save normalized counters to memory buffer using writeNCount()
4584. build encoding table 'CTable' from normalized counters
4595. encode the data stream using encoding table 'CTable'
460
461FSE_decompress() does the following:
4621. read normalized counters with readNCount()
4632. build decoding table 'DTable' from normalized counters
4643. decode the data stream using decoding table 'DTable'
465
466The following API allows targeting specific sub-functions for advanced tasks.
467For example, it's possible to compress several blocks using the same 'CTable',
468or to save and provide normalized distribution using external method.
469*/
470
471
472/* *** DECOMPRESSION *** */
473
474/*!
475FSE_readNCount():
476 Read compactly saved 'normalizedCounter' from 'rBuffer'.
477 return : size read from 'rBuffer'
478 or an errorCode, which can be tested using FSE_isError()
479 maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
480static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
481
482/*!
483Constructor and Destructor of type FSE_DTable
484 Note that its size depends on 'tableLog' */
485typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
486
487/*!
488FSE_buildDTable():
489 Builds 'dt', which must be already allocated, using FSE_createDTable()
490 return : 0,
491 or an errorCode, which can be tested using FSE_isError() */
492static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
493
494/*!
495FSE_decompress_usingDTable():
496 Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'
497 into 'dst' which must be already allocated.
498 return : size of regenerated data (necessarily <= maxDstSize)
499 or an errorCode, which can be tested using FSE_isError() */
500static size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
501
502/*!
503Tutorial :
504----------
505(Note : these functions only decompress FSE-compressed blocks.
506 If block is uncompressed, use memcpy() instead
507 If block is a single repeated byte, use memset() instead )
508
509The first step is to obtain the normalized frequencies of symbols.
510This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
511'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
512In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
513or size the table to handle worst case situations (typically 256).
514FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
515The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
516Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
517If there is an error, the function will return an error code, which can be tested using FSE_isError().
518
519The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
520This is performed by the function FSE_buildDTable().
521The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
522If there is an error, the function will return an error code, which can be tested using FSE_isError().
523
524'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().
525'cSrcSize' must be strictly correct, otherwise decompression will fail.
526FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).
527If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
528*/
529
530
531#if defined (__cplusplus)
532}
533#endif
534
535#endif /* FSE_H */
536
537
538/* ******************************************************************
539 bitstream
540 Part of NewGen Entropy library
541 header file (to include)
542 Copyright (C) 2013-2015, Yann Collet.
543
544 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
545
546 Redistribution and use in source and binary forms, with or without
547 modification, are permitted provided that the following conditions are
548 met:
549
550 * Redistributions of source code must retain the above copyright
551 notice, this list of conditions and the following disclaimer.
552 * Redistributions in binary form must reproduce the above
553 copyright notice, this list of conditions and the following disclaimer
554 in the documentation and/or other materials provided with the
555 distribution.
556
557 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
558 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
559 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
560 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
561 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
562 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
563 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
564 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
565 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
566 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
567 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
568
569 You can contact the author at :
570 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
571 - Public forum : https://groups.google.com/forum/#!forum/lz4c
572****************************************************************** */
573#ifndef BITSTREAM_H_MODULE
574#define BITSTREAM_H_MODULE
575
576#if defined (__cplusplus)
577extern "C" {
578#endif
579
580
581/*
582* This API consists of small unitary functions, which highly benefit from being inlined.
583* Since link-time-optimization is not available for all compilers,
584* these functions are defined into a .h to be included.
585*/
586
587/**********************************************
588* bitStream decompression API (read backward)
589**********************************************/
590typedef struct
591{
592 size_t bitContainer;
593 unsigned bitsConsumed;
594 const char* ptr;
595 const char* start;
596} BIT_DStream_t;
597
598typedef enum { BIT_DStream_unfinished = 0,
599 BIT_DStream_endOfBuffer = 1,
600 BIT_DStream_completed = 2,
601 BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
602 /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
603
604MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
605MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
606MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
607MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
608
609
610
611
612/******************************************
613* unsafe API
614******************************************/
615MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
616/* faster, but works only if nbBits >= 1 */
617
618
619
620/****************************************************************
621* Helper functions
622****************************************************************/
623MEM_STATIC unsigned BIT_highbit32 (U32 val)
624{
625# if defined(_MSC_VER) /* Visual */
626 unsigned long r=0;
627 _BitScanReverse ( &r, val );
628 return (unsigned) r;
629# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
630 return 31 - __builtin_clz (val);
631# else /* Software version */
632 static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
633 U32 v = val;
634 unsigned r;
635 v |= v >> 1;
636 v |= v >> 2;
637 v |= v >> 4;
638 v |= v >> 8;
639 v |= v >> 16;
640 r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
641 return r;
642# endif
643}
644
645
646/**********************************************************
647* bitStream decoding
648**********************************************************/
649
650/*!BIT_initDStream
651* Initialize a BIT_DStream_t.
652* @bitD : a pointer to an already allocated BIT_DStream_t structure
653* @srcBuffer must point at the beginning of a bitStream
654* @srcSize must be the exact size of the bitStream
655* @result : size of stream (== srcSize) or an errorCode if a problem is detected
656*/
657MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
658{
659 if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
660
661 if (srcSize >= sizeof(size_t)) /* normal case */
662 {
663 U32 contain32;
664 bitD->start = (const char*)srcBuffer;
665 bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
666 bitD->bitContainer = MEM_readLEST(bitD->ptr);
667 contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
668 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
669 bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
670 }
671 else
672 {
673 U32 contain32;
674 bitD->start = (const char*)srcBuffer;
675 bitD->ptr = bitD->start;
676 bitD->bitContainer = *(const BYTE*)(bitD->start);
677 switch(srcSize)
678 {
679 case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
680 case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
681 case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
682 case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
683 case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
684 case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
685 default: break;
686 }
687 contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
688 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
689 bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
690 bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
691 }
692
693 return srcSize;
694}
695
696MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
697{
698 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
699 return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
700}
701
702/*! BIT_lookBitsFast :
703* unsafe version; only works only if nbBits >= 1 */
704MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
705{
706 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
707 return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
708}
709
710MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
711{
712 bitD->bitsConsumed += nbBits;
713}
714
715MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
716{
717 size_t value = BIT_lookBits(bitD, nbBits);
718 BIT_skipBits(bitD, nbBits);
719 return value;
720}
721
722/*!BIT_readBitsFast :
723* unsafe version; only works only if nbBits >= 1 */
724MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
725{
726 size_t value = BIT_lookBitsFast(bitD, nbBits);
727 BIT_skipBits(bitD, nbBits);
728 return value;
729}
730
731MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
732{
733 if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
734 return BIT_DStream_overflow;
735
736 if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
737 {
738 bitD->ptr -= bitD->bitsConsumed >> 3;
739 bitD->bitsConsumed &= 7;
740 bitD->bitContainer = MEM_readLEST(bitD->ptr);
741 return BIT_DStream_unfinished;
742 }
743 if (bitD->ptr == bitD->start)
744 {
745 if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
746 return BIT_DStream_completed;
747 }
748 {
749 U32 nbBytes = bitD->bitsConsumed >> 3;
750 BIT_DStream_status result = BIT_DStream_unfinished;
751 if (bitD->ptr - nbBytes < bitD->start)
752 {
753 nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
754 result = BIT_DStream_endOfBuffer;
755 }
756 bitD->ptr -= nbBytes;
757 bitD->bitsConsumed -= nbBytes*8;
758 bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
759 return result;
760 }
761}
762
763/*! BIT_endOfDStream
764* @return Tells if DStream has reached its exact end
765*/
766MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
767{
768 return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
769}
770
771#if defined (__cplusplus)
772}
773#endif
774
775#endif /* BITSTREAM_H_MODULE */
776
777
778
779/* ******************************************************************
780 FSE : Finite State Entropy coder
781 header file for static linking (only)
782 Copyright (C) 2013-2015, Yann Collet
783
784 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
785
786 Redistribution and use in source and binary forms, with or without
787 modification, are permitted provided that the following conditions are
788 met:
789
790 * Redistributions of source code must retain the above copyright
791 notice, this list of conditions and the following disclaimer.
792 * Redistributions in binary form must reproduce the above
793 copyright notice, this list of conditions and the following disclaimer
794 in the documentation and/or other materials provided with the
795 distribution.
796
797 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
798 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
799 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
800 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
801 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
802 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
803 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
804 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
805 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
806 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
807 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
808
809 You can contact the author at :
810 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
811 - Public forum : https://groups.google.com/forum/#!forum/lz4c
812****************************************************************** */
813#ifndef FSE_STATIC_H
814#define FSE_STATIC_H
815
816#if defined (__cplusplus)
817extern "C" {
818#endif
819
820
821/* *****************************************
822* Static allocation
823*******************************************/
824/* FSE buffer bounds */
825#define FSE_NCOUNTBOUND 512
826#define FSE_BLOCKBOUND(size) (size + (size>>7))
827#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
828
829/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
830#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
831#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
832
833
834/* *****************************************
835* FSE advanced API
836*******************************************/
837static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
838/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
839
840static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
841/* build a fake FSE_DTable, designed to always generate the same symbolValue */
842
843
844
845/* *****************************************
846* FSE symbol decompression API
847*******************************************/
848typedef struct
849{
850 size_t state;
851 const void* table; /* precise table may vary, depending on U16 */
852} FSE_DState_t;
853
854
855static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
856
857static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
858
859static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
860
861
862/* *****************************************
863* FSE unsafe API
864*******************************************/
865static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
866/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
867
868
869/* *****************************************
870* Implementation of inlined functions
871*******************************************/
872/* decompression */
873
874typedef struct {
875 U16 tableLog;
876 U16 fastMode;
877} FSE_DTableHeader; /* sizeof U32 */
878
879typedef struct
880{
881 unsigned short newState;
882 unsigned char symbol;
883 unsigned char nbBits;
884} FSE_decode_t; /* size == U32 */
885
886MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
887{
888 FSE_DTableHeader DTableH;
889 memcpy(&DTableH, dt, sizeof(DTableH));
890 DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
891 BIT_reloadDStream(bitD);
892 DStatePtr->table = dt + 1;
893}
894
895MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
896{
897 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
898 const U32 nbBits = DInfo.nbBits;
899 BYTE symbol = DInfo.symbol;
900 size_t lowBits = BIT_readBits(bitD, nbBits);
901
902 DStatePtr->state = DInfo.newState + lowBits;
903 return symbol;
904}
905
906MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
907{
908 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
909 const U32 nbBits = DInfo.nbBits;
910 BYTE symbol = DInfo.symbol;
911 size_t lowBits = BIT_readBitsFast(bitD, nbBits);
912
913 DStatePtr->state = DInfo.newState + lowBits;
914 return symbol;
915}
916
917MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
918{
919 return DStatePtr->state == 0;
920}
921
922
923#if defined (__cplusplus)
924}
925#endif
926
927#endif /* FSE_STATIC_H */
928
929/* ******************************************************************
930 FSE : Finite State Entropy coder
931 Copyright (C) 2013-2015, Yann Collet.
932
933 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
934
935 Redistribution and use in source and binary forms, with or without
936 modification, are permitted provided that the following conditions are
937 met:
938
939 * Redistributions of source code must retain the above copyright
940 notice, this list of conditions and the following disclaimer.
941 * Redistributions in binary form must reproduce the above
942 copyright notice, this list of conditions and the following disclaimer
943 in the documentation and/or other materials provided with the
944 distribution.
945
946 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
947 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
948 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
949 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
950 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
951 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
952 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
953 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
954 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
955 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
956 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
957
958 You can contact the author at :
959 - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
960 - Public forum : https://groups.google.com/forum/#!forum/lz4c
961****************************************************************** */
962
963#ifndef FSE_COMMONDEFS_ONLY
964
965/* **************************************************************
966* Tuning parameters
967****************************************************************/
968/*!MEMORY_USAGE :
969* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
970* Increasing memory usage improves compression ratio
971* Reduced memory usage can improve speed, due to cache effect
972* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
973#define FSE_MAX_MEMORY_USAGE 14
974#define FSE_DEFAULT_MEMORY_USAGE 13
975
976/*!FSE_MAX_SYMBOL_VALUE :
977* Maximum symbol value authorized.
978* Required for proper stack allocation */
979#define FSE_MAX_SYMBOL_VALUE 255
980
981
982/* **************************************************************
983* template functions type & suffix
984****************************************************************/
985#define FSE_FUNCTION_TYPE BYTE
986#define FSE_FUNCTION_EXTENSION
987#define FSE_DECODE_TYPE FSE_decode_t
988
989
990#endif /* !FSE_COMMONDEFS_ONLY */
991
992/* **************************************************************
993* Compiler specifics
994****************************************************************/
995#ifdef _MSC_VER /* Visual Studio */
996# define FORCE_INLINE static __forceinline
997# include <intrin.h> /* For Visual 2005 */
998# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
999# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
1000#else
1001# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
1002# ifdef __GNUC__
1003# define FORCE_INLINE static inline __attribute__((always_inline))
1004# else
1005# define FORCE_INLINE static inline
1006# endif
1007# else
1008# define FORCE_INLINE static
1009# endif /* __STDC_VERSION__ */
1010#endif
1011
1012
1013/* **************************************************************
1014* Dependencies
1015****************************************************************/
1016#include <stdlib.h> /* malloc, free, qsort */
1017#include <string.h> /* memcpy, memset */
1018#include <stdio.h> /* printf (debug) */
1019
1020
1021/* ***************************************************************
1022* Constants
1023*****************************************************************/
1024#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
1025#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
1026#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
1027#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
1028#define FSE_MIN_TABLELOG 5
1029
1030#define FSE_TABLELOG_ABSOLUTE_MAX 15
1031#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
1032#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
1033#endif
1034
1035
1036/* **************************************************************
1037* Error Management
1038****************************************************************/
1039#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
1040
1041
1042/* **************************************************************
1043* Complex types
1044****************************************************************/
1045typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
1046
1047
1048/*-**************************************************************
1049* Templates
1050****************************************************************/
1051/*
1052 designed to be included
1053 for type-specific functions (template emulation in C)
1054 Objective is to write these functions only once, for improved maintenance
1055*/
1056
1057/* safety checks */
1058#ifndef FSE_FUNCTION_EXTENSION
1059# error "FSE_FUNCTION_EXTENSION must be defined"
1060#endif
1061#ifndef FSE_FUNCTION_TYPE
1062# error "FSE_FUNCTION_TYPE must be defined"
1063#endif
1064
1065/* Function names */
1066#define FSE_CAT(X,Y) X##Y
1067#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
1068#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
1069
1070static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
1071
1072
1073static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
1074{
1075 FSE_DTableHeader DTableH;
1076 void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */
1077 FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
1078 const U32 tableSize = 1 << tableLog;
1079 const U32 tableMask = tableSize-1;
1080 const U32 step = FSE_tableStep(tableSize);
1081 U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
1082 U32 position = 0;
1083 U32 highThreshold = tableSize-1;
1084 const S16 largeLimit= (S16)(1 << (tableLog-1));
1085 U32 noLarge = 1;
1086 U32 s;
1087
1088 /* Sanity Checks */
1089 if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
1090 if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
1091
1092 /* Init, lay down lowprob symbols */
Abhilash S.L3b494632019-07-16 15:51:09 +05301093 memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
William Kurkianea869482019-04-09 15:16:11 -04001094 DTableH.tableLog = (U16)tableLog;
1095 for (s=0; s<=maxSymbolValue; s++)
1096 {
1097 if (normalizedCounter[s]==-1)
1098 {
1099 tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
1100 symbolNext[s] = 1;
1101 }
1102 else
1103 {
1104 if (normalizedCounter[s] >= largeLimit) noLarge=0;
1105 symbolNext[s] = normalizedCounter[s];
1106 }
1107 }
1108
1109 /* Spread symbols */
1110 for (s=0; s<=maxSymbolValue; s++)
1111 {
1112 int i;
1113 for (i=0; i<normalizedCounter[s]; i++)
1114 {
1115 tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
1116 position = (position + step) & tableMask;
1117 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
1118 }
1119 }
1120
1121 if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
1122
1123 /* Build Decoding table */
1124 {
1125 U32 i;
1126 for (i=0; i<tableSize; i++)
1127 {
1128 FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
1129 U16 nextState = symbolNext[symbol]++;
1130 tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
1131 tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
1132 }
1133 }
1134
1135 DTableH.fastMode = (U16)noLarge;
1136 memcpy(dt, &DTableH, sizeof(DTableH));
1137 return 0;
1138}
1139
1140
1141#ifndef FSE_COMMONDEFS_ONLY
1142/******************************************
1143* FSE helper functions
1144******************************************/
1145static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
1146
1147
1148/****************************************************************
1149* FSE NCount encoding-decoding
1150****************************************************************/
1151static short FSE_abs(short a)
1152{
1153 return a<0 ? -a : a;
1154}
1155
1156static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
1157 const void* headerBuffer, size_t hbSize)
1158{
1159 const BYTE* const istart = (const BYTE*) headerBuffer;
1160 const BYTE* const iend = istart + hbSize;
1161 const BYTE* ip = istart;
1162 int nbBits;
1163 int remaining;
1164 int threshold;
1165 U32 bitStream;
1166 int bitCount;
1167 unsigned charnum = 0;
1168 int previous0 = 0;
1169
1170 if (hbSize < 4) return ERROR(srcSize_wrong);
1171 bitStream = MEM_readLE32(ip);
1172 nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
1173 if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
1174 bitStream >>= 4;
1175 bitCount = 4;
1176 *tableLogPtr = nbBits;
1177 remaining = (1<<nbBits)+1;
1178 threshold = 1<<nbBits;
1179 nbBits++;
1180
1181 while ((remaining>1) && (charnum<=*maxSVPtr))
1182 {
1183 if (previous0)
1184 {
1185 unsigned n0 = charnum;
1186 while ((bitStream & 0xFFFF) == 0xFFFF)
1187 {
1188 n0+=24;
1189 if (ip < iend-5)
1190 {
1191 ip+=2;
1192 bitStream = MEM_readLE32(ip) >> bitCount;
1193 }
1194 else
1195 {
1196 bitStream >>= 16;
1197 bitCount+=16;
1198 }
1199 }
1200 while ((bitStream & 3) == 3)
1201 {
1202 n0+=3;
1203 bitStream>>=2;
1204 bitCount+=2;
1205 }
1206 n0 += bitStream & 3;
1207 bitCount += 2;
1208 if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
1209 while (charnum < n0) normalizedCounter[charnum++] = 0;
1210 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
1211 {
1212 ip += bitCount>>3;
1213 bitCount &= 7;
1214 bitStream = MEM_readLE32(ip) >> bitCount;
1215 }
1216 else
1217 bitStream >>= 2;
1218 }
1219 {
1220 const short max = (short)((2*threshold-1)-remaining);
1221 short count;
1222
1223 if ((bitStream & (threshold-1)) < (U32)max)
1224 {
1225 count = (short)(bitStream & (threshold-1));
1226 bitCount += nbBits-1;
1227 }
1228 else
1229 {
1230 count = (short)(bitStream & (2*threshold-1));
1231 if (count >= threshold) count -= max;
1232 bitCount += nbBits;
1233 }
1234
1235 count--; /* extra accuracy */
1236 remaining -= FSE_abs(count);
1237 normalizedCounter[charnum++] = count;
1238 previous0 = !count;
1239 while (remaining < threshold)
1240 {
1241 nbBits--;
1242 threshold >>= 1;
1243 }
1244
1245 {
1246 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
1247 {
1248 ip += bitCount>>3;
1249 bitCount &= 7;
1250 }
1251 else
1252 {
1253 bitCount -= (int)(8 * (iend - 4 - ip));
1254 ip = iend - 4;
1255 }
1256 bitStream = MEM_readLE32(ip) >> (bitCount & 31);
1257 }
1258 }
1259 }
1260 if (remaining != 1) return ERROR(GENERIC);
1261 *maxSVPtr = charnum-1;
1262
1263 ip += (bitCount+7)>>3;
1264 if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
1265 return ip-istart;
1266}
1267
1268
1269/*********************************************************
1270* Decompression (Byte symbols)
1271*********************************************************/
1272static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
1273{
1274 void* ptr = dt;
1275 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
1276 void* dPtr = dt + 1;
1277 FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
1278
1279 DTableH->tableLog = 0;
1280 DTableH->fastMode = 0;
1281
1282 cell->newState = 0;
1283 cell->symbol = symbolValue;
1284 cell->nbBits = 0;
1285
1286 return 0;
1287}
1288
1289
1290static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
1291{
1292 void* ptr = dt;
1293 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
1294 void* dPtr = dt + 1;
1295 FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
1296 const unsigned tableSize = 1 << nbBits;
1297 const unsigned tableMask = tableSize - 1;
1298 const unsigned maxSymbolValue = tableMask;
1299 unsigned s;
1300
1301 /* Sanity checks */
1302 if (nbBits < 1) return ERROR(GENERIC); /* min size */
1303
1304 /* Build Decoding Table */
1305 DTableH->tableLog = (U16)nbBits;
1306 DTableH->fastMode = 1;
1307 for (s=0; s<=maxSymbolValue; s++)
1308 {
1309 dinfo[s].newState = 0;
1310 dinfo[s].symbol = (BYTE)s;
1311 dinfo[s].nbBits = (BYTE)nbBits;
1312 }
1313
1314 return 0;
1315}
1316
1317FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
1318 void* dst, size_t maxDstSize,
1319 const void* cSrc, size_t cSrcSize,
1320 const FSE_DTable* dt, const unsigned fast)
1321{
1322 BYTE* const ostart = (BYTE*) dst;
1323 BYTE* op = ostart;
1324 BYTE* const omax = op + maxDstSize;
1325 BYTE* const olimit = omax-3;
1326
1327 BIT_DStream_t bitD;
1328 FSE_DState_t state1;
1329 FSE_DState_t state2;
1330 size_t errorCode;
1331
1332 /* Init */
1333 errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
1334 if (FSE_isError(errorCode)) return errorCode;
1335
1336 FSE_initDState(&state1, &bitD, dt);
1337 FSE_initDState(&state2, &bitD, dt);
1338
1339#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
1340
1341 /* 4 symbols per loop */
1342 for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
1343 {
1344 op[0] = FSE_GETSYMBOL(&state1);
1345
1346 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1347 BIT_reloadDStream(&bitD);
1348
1349 op[1] = FSE_GETSYMBOL(&state2);
1350
1351 if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1352 { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
1353
1354 op[2] = FSE_GETSYMBOL(&state1);
1355
1356 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1357 BIT_reloadDStream(&bitD);
1358
1359 op[3] = FSE_GETSYMBOL(&state2);
1360 }
1361
1362 /* tail */
1363 /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
1364 while (1)
1365 {
1366 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
1367 break;
1368
1369 *op++ = FSE_GETSYMBOL(&state1);
1370
1371 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
1372 break;
1373
1374 *op++ = FSE_GETSYMBOL(&state2);
1375 }
1376
1377 /* end ? */
1378 if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
1379 return op-ostart;
1380
1381 if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
1382
1383 return ERROR(corruption_detected);
1384}
1385
1386
1387static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
1388 const void* cSrc, size_t cSrcSize,
1389 const FSE_DTable* dt)
1390{
1391 FSE_DTableHeader DTableH;
1392 U32 fastMode;
1393
1394 memcpy(&DTableH, dt, sizeof(DTableH));
1395 fastMode = DTableH.fastMode;
1396
1397 /* select fast mode (static) */
1398 if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
1399 return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
1400}
1401
1402
1403static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
1404{
1405 const BYTE* const istart = (const BYTE*)cSrc;
1406 const BYTE* ip = istart;
1407 short counting[FSE_MAX_SYMBOL_VALUE+1];
1408 DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
1409 unsigned tableLog;
1410 unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
1411 size_t errorCode;
1412
1413 if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
1414
1415 /* normal FSE decoding mode */
1416 errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
1417 if (FSE_isError(errorCode)) return errorCode;
1418 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
1419 ip += errorCode;
1420 cSrcSize -= errorCode;
1421
1422 errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
1423 if (FSE_isError(errorCode)) return errorCode;
1424
1425 /* always return, even if it is an error code */
1426 return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
1427}
1428
1429
1430
1431#endif /* FSE_COMMONDEFS_ONLY */
1432
1433
1434/* ******************************************************************
1435 Huff0 : Huffman coder, part of New Generation Entropy library
1436 header file
1437 Copyright (C) 2013-2015, Yann Collet.
1438
1439 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1440
1441 Redistribution and use in source and binary forms, with or without
1442 modification, are permitted provided that the following conditions are
1443 met:
1444
1445 * Redistributions of source code must retain the above copyright
1446 notice, this list of conditions and the following disclaimer.
1447 * Redistributions in binary form must reproduce the above
1448 copyright notice, this list of conditions and the following disclaimer
1449 in the documentation and/or other materials provided with the
1450 distribution.
1451
1452 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1453 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1454 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1455 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1456 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1457 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1458 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1459 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1460 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1461 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1462 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1463
1464 You can contact the author at :
1465 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
1466 - Public forum : https://groups.google.com/forum/#!forum/lz4c
1467****************************************************************** */
1468#ifndef HUFF0_H
1469#define HUFF0_H
1470
1471#if defined (__cplusplus)
1472extern "C" {
1473#endif
1474
1475
1476/* ****************************************
1477* Dependency
1478******************************************/
1479#include <stddef.h> /* size_t */
1480
1481
1482/* ****************************************
1483* Huff0 simple functions
1484******************************************/
1485static size_t HUF_decompress(void* dst, size_t dstSize,
1486 const void* cSrc, size_t cSrcSize);
1487/*!
1488HUF_decompress():
1489 Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
1490 into already allocated destination buffer 'dst', of size 'dstSize'.
1491 'dstSize' must be the exact size of original (uncompressed) data.
1492 Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.
1493 @return : size of regenerated data (== dstSize)
1494 or an error code, which can be tested using HUF_isError()
1495*/
1496
1497
1498/* ****************************************
1499* Tool functions
1500******************************************/
1501/* Error Management */
1502static unsigned HUF_isError(size_t code); /* tells if a return value is an error code */
1503
1504
1505#if defined (__cplusplus)
1506}
1507#endif
1508
1509#endif /* HUFF0_H */
1510
1511
1512/* ******************************************************************
1513 Huff0 : Huffman coder, part of New Generation Entropy library
1514 header file for static linking (only)
1515 Copyright (C) 2013-2015, Yann Collet
1516
1517 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1518
1519 Redistribution and use in source and binary forms, with or without
1520 modification, are permitted provided that the following conditions are
1521 met:
1522
1523 * Redistributions of source code must retain the above copyright
1524 notice, this list of conditions and the following disclaimer.
1525 * Redistributions in binary form must reproduce the above
1526 copyright notice, this list of conditions and the following disclaimer
1527 in the documentation and/or other materials provided with the
1528 distribution.
1529
1530 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1531 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1532 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1533 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1534 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1535 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1536 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1537 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1538 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1539 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1540 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1541
1542 You can contact the author at :
1543 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
1544 - Public forum : https://groups.google.com/forum/#!forum/lz4c
1545****************************************************************** */
1546#ifndef HUFF0_STATIC_H
1547#define HUFF0_STATIC_H
1548
1549#if defined (__cplusplus)
1550extern "C" {
1551#endif
1552
1553
1554
1555/* ****************************************
1556* Static allocation macros
1557******************************************/
1558/* static allocation of Huff0's DTable */
1559#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
1560#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
1561 unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
1562#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
1563 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
1564#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
1565 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
1566
1567
1568/* ****************************************
1569* Advanced decompression functions
1570******************************************/
1571static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
1572static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
1573
1574
1575/* ****************************************
1576* Huff0 detailed API
1577******************************************/
1578/*!
1579HUF_decompress() does the following:
15801. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
15812. build Huffman table from save, using HUF_readDTableXn()
15823. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
1583
1584*/
1585static size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
1586static size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
1587
1588static size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
1589static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
1590
1591
1592#if defined (__cplusplus)
1593}
1594#endif
1595
1596#endif /* HUFF0_STATIC_H */
1597
1598
1599
1600/* ******************************************************************
1601 Huff0 : Huffman coder, part of New Generation Entropy library
1602 Copyright (C) 2013-2015, Yann Collet.
1603
1604 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1605
1606 Redistribution and use in source and binary forms, with or without
1607 modification, are permitted provided that the following conditions are
1608 met:
1609
1610 * Redistributions of source code must retain the above copyright
1611 notice, this list of conditions and the following disclaimer.
1612 * Redistributions in binary form must reproduce the above
1613 copyright notice, this list of conditions and the following disclaimer
1614 in the documentation and/or other materials provided with the
1615 distribution.
1616
1617 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1618 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1619 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1620 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1621 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1622 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1623 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1624 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1625 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1626 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1627 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1628
1629 You can contact the author at :
1630 - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
1631****************************************************************** */
1632
1633/* **************************************************************
1634* Compiler specifics
1635****************************************************************/
1636#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
1637/* inline is defined */
1638#elif defined(_MSC_VER)
1639# define inline __inline
1640#else
1641# define inline /* disable inline */
1642#endif
1643
1644
1645#ifdef _MSC_VER /* Visual Studio */
1646# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1647#endif
1648
1649
1650/* **************************************************************
1651* Includes
1652****************************************************************/
1653#include <stdlib.h> /* malloc, free, qsort */
1654#include <string.h> /* memcpy, memset */
1655#include <stdio.h> /* printf (debug) */
1656
1657
1658/* **************************************************************
1659* Constants
1660****************************************************************/
1661#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
1662#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
1663#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
1664#define HUF_MAX_SYMBOL_VALUE 255
1665#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
1666# error "HUF_MAX_TABLELOG is too large !"
1667#endif
1668
1669
1670/* **************************************************************
1671* Error Management
1672****************************************************************/
1673static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
1674#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
1675
1676
1677
1678/*-*******************************************************
1679* Huff0 : Huffman block decompression
1680*********************************************************/
1681typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
1682
1683typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
1684
1685typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
1686
1687/*! HUF_readStats
1688 Read compact Huffman tree, saved by HUF_writeCTable
1689 @huffWeight : destination buffer
1690 @return : size read from `src`
1691*/
1692static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
1693 U32* nbSymbolsPtr, U32* tableLogPtr,
1694 const void* src, size_t srcSize)
1695{
1696 U32 weightTotal;
1697 U32 tableLog;
1698 const BYTE* ip = (const BYTE*) src;
1699 size_t iSize;
1700 size_t oSize;
1701 U32 n;
1702
1703 if (!srcSize) return ERROR(srcSize_wrong);
1704 iSize = ip[0];
1705 //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
1706
1707 if (iSize >= 128) /* special header */
1708 {
1709 if (iSize >= (242)) /* RLE */
1710 {
1711 static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
1712 oSize = l[iSize-242];
1713 memset(huffWeight, 1, hwSize);
1714 iSize = 0;
1715 }
1716 else /* Incompressible */
1717 {
1718 oSize = iSize - 127;
1719 iSize = ((oSize+1)/2);
1720 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
1721 if (oSize >= hwSize) return ERROR(corruption_detected);
1722 ip += 1;
1723 for (n=0; n<oSize; n+=2)
1724 {
1725 huffWeight[n] = ip[n/2] >> 4;
1726 huffWeight[n+1] = ip[n/2] & 15;
1727 }
1728 }
1729 }
1730 else /* header compressed with FSE (normal case) */
1731 {
1732 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
1733 oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
1734 if (FSE_isError(oSize)) return oSize;
1735 }
1736
1737 /* collect weight stats */
1738 memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
1739 weightTotal = 0;
1740 for (n=0; n<oSize; n++)
1741 {
1742 if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
1743 rankStats[huffWeight[n]]++;
1744 weightTotal += (1 << huffWeight[n]) >> 1;
1745 }
1746 if (weightTotal == 0) return ERROR(corruption_detected);
1747
1748 /* get last non-null symbol weight (implied, total must be 2^n) */
1749 tableLog = BIT_highbit32(weightTotal) + 1;
1750 if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
1751 {
1752 U32 total = 1 << tableLog;
1753 U32 rest = total - weightTotal;
1754 U32 verif = 1 << BIT_highbit32(rest);
1755 U32 lastWeight = BIT_highbit32(rest) + 1;
1756 if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
1757 huffWeight[oSize] = (BYTE)lastWeight;
1758 rankStats[lastWeight]++;
1759 }
1760
1761 /* check tree construction validity */
1762 if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
1763
1764 /* results */
1765 *nbSymbolsPtr = (U32)(oSize+1);
1766 *tableLogPtr = tableLog;
1767 return iSize+1;
1768}
1769
1770
1771/**************************/
1772/* single-symbol decoding */
1773/**************************/
1774
1775static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
1776{
1777 BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
1778 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
1779 U32 tableLog = 0;
1780 size_t iSize;
1781 U32 nbSymbols = 0;
1782 U32 n;
1783 U32 nextRankStart;
1784 void* const dtPtr = DTable + 1;
1785 HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
1786
1787 HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
1788 //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
1789
1790 iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
1791 if (HUF_isError(iSize)) return iSize;
1792
1793 /* check result */
1794 if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
1795 DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
1796
1797 /* Prepare ranks */
1798 nextRankStart = 0;
1799 for (n=1; n<=tableLog; n++)
1800 {
1801 U32 current = nextRankStart;
1802 nextRankStart += (rankVal[n] << (n-1));
1803 rankVal[n] = current;
1804 }
1805
1806 /* fill DTable */
1807 for (n=0; n<nbSymbols; n++)
1808 {
1809 const U32 w = huffWeight[n];
1810 const U32 length = (1 << w) >> 1;
1811 U32 i;
1812 HUF_DEltX2 D;
1813 D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
1814 for (i = rankVal[w]; i < rankVal[w] + length; i++)
1815 dt[i] = D;
1816 rankVal[w] += length;
1817 }
1818
1819 return iSize;
1820}
1821
1822static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
1823{
1824 const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
1825 const BYTE c = dt[val].byte;
1826 BIT_skipBits(Dstream, dt[val].nbBits);
1827 return c;
1828}
1829
1830#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
1831 *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
1832
1833#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
1834 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
1835 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
1836
1837#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
1838 if (MEM_64bits()) \
1839 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
1840
1841static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
1842{
1843 BYTE* const pStart = p;
1844
1845 /* up to 4 symbols at a time */
1846 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
1847 {
1848 HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1849 HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
1850 HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1851 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1852 }
1853
1854 /* closer to the end */
1855 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
1856 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1857
1858 /* no more data to retrieve from bitstream, hence no need to reload */
1859 while (p < pEnd)
1860 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1861
1862 return pEnd-pStart;
1863}
1864
1865
1866static size_t HUF_decompress4X2_usingDTable(
1867 void* dst, size_t dstSize,
1868 const void* cSrc, size_t cSrcSize,
1869 const U16* DTable)
1870{
1871 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
1872
1873 {
1874 const BYTE* const istart = (const BYTE*) cSrc;
1875 BYTE* const ostart = (BYTE*) dst;
1876 BYTE* const oend = ostart + dstSize;
1877 const void* const dtPtr = DTable;
1878 const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;
1879 const U32 dtLog = DTable[0];
1880 size_t errorCode;
1881
1882 /* Init */
1883 BIT_DStream_t bitD1;
1884 BIT_DStream_t bitD2;
1885 BIT_DStream_t bitD3;
1886 BIT_DStream_t bitD4;
1887 const size_t length1 = MEM_readLE16(istart);
1888 const size_t length2 = MEM_readLE16(istart+2);
1889 const size_t length3 = MEM_readLE16(istart+4);
1890 size_t length4;
1891 const BYTE* const istart1 = istart + 6; /* jumpTable */
1892 const BYTE* const istart2 = istart1 + length1;
1893 const BYTE* const istart3 = istart2 + length2;
1894 const BYTE* const istart4 = istart3 + length3;
1895 const size_t segmentSize = (dstSize+3) / 4;
1896 BYTE* const opStart2 = ostart + segmentSize;
1897 BYTE* const opStart3 = opStart2 + segmentSize;
1898 BYTE* const opStart4 = opStart3 + segmentSize;
1899 BYTE* op1 = ostart;
1900 BYTE* op2 = opStart2;
1901 BYTE* op3 = opStart3;
1902 BYTE* op4 = opStart4;
1903 U32 endSignal;
1904
1905 length4 = cSrcSize - (length1 + length2 + length3 + 6);
1906 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
1907 errorCode = BIT_initDStream(&bitD1, istart1, length1);
1908 if (HUF_isError(errorCode)) return errorCode;
1909 errorCode = BIT_initDStream(&bitD2, istart2, length2);
1910 if (HUF_isError(errorCode)) return errorCode;
1911 errorCode = BIT_initDStream(&bitD3, istart3, length3);
1912 if (HUF_isError(errorCode)) return errorCode;
1913 errorCode = BIT_initDStream(&bitD4, istart4, length4);
1914 if (HUF_isError(errorCode)) return errorCode;
1915
1916 /* 16-32 symbols per loop (4-8 symbols per stream) */
1917 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
1918 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
1919 {
1920 HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1921 HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1922 HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1923 HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1924 HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
1925 HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
1926 HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
1927 HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
1928 HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1929 HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1930 HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1931 HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1932 HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
1933 HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
1934 HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
1935 HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
1936
1937 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
1938 }
1939
1940 /* check corruption */
1941 if (op1 > opStart2) return ERROR(corruption_detected);
1942 if (op2 > opStart3) return ERROR(corruption_detected);
1943 if (op3 > opStart4) return ERROR(corruption_detected);
1944 /* note : op4 supposed already verified within main loop */
1945
1946 /* finish bitStreams one by one */
1947 HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
1948 HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
1949 HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
1950 HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
1951
1952 /* check */
1953 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
1954 if (!endSignal) return ERROR(corruption_detected);
1955
1956 /* decoded size */
1957 return dstSize;
1958 }
1959}
1960
1961
1962static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
1963{
1964 HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
1965 const BYTE* ip = (const BYTE*) cSrc;
1966 size_t errorCode;
1967
1968 errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
1969 if (HUF_isError(errorCode)) return errorCode;
1970 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
1971 ip += errorCode;
1972 cSrcSize -= errorCode;
1973
1974 return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
1975}
1976
1977
1978/***************************/
1979/* double-symbols decoding */
1980/***************************/
1981
1982static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
1983 const U32* rankValOrigin, const int minWeight,
1984 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
1985 U32 nbBitsBaseline, U16 baseSeq)
1986{
1987 HUF_DEltX4 DElt;
1988 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
1989 U32 s;
1990
1991 /* get pre-calculated rankVal */
1992 memcpy(rankVal, rankValOrigin, sizeof(rankVal));
1993
1994 /* fill skipped values */
1995 if (minWeight>1)
1996 {
1997 U32 i, skipSize = rankVal[minWeight];
1998 MEM_writeLE16(&(DElt.sequence), baseSeq);
1999 DElt.nbBits = (BYTE)(consumed);
2000 DElt.length = 1;
2001 for (i = 0; i < skipSize; i++)
2002 DTable[i] = DElt;
2003 }
2004
2005 /* fill DTable */
2006 for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
2007 {
2008 const U32 symbol = sortedSymbols[s].symbol;
2009 const U32 weight = sortedSymbols[s].weight;
2010 const U32 nbBits = nbBitsBaseline - weight;
2011 const U32 length = 1 << (sizeLog-nbBits);
2012 const U32 start = rankVal[weight];
2013 U32 i = start;
2014 const U32 end = start + length;
2015
2016 MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
2017 DElt.nbBits = (BYTE)(nbBits + consumed);
2018 DElt.length = 2;
2019 do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
2020
2021 rankVal[weight] += length;
2022 }
2023}
2024
2025typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
2026
2027static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
2028 const sortedSymbol_t* sortedList, const U32 sortedListSize,
2029 const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
2030 const U32 nbBitsBaseline)
2031{
2032 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
2033 const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
2034 const U32 minBits = nbBitsBaseline - maxWeight;
2035 U32 s;
2036
2037 memcpy(rankVal, rankValOrigin, sizeof(rankVal));
2038
2039 /* fill DTable */
2040 for (s=0; s<sortedListSize; s++)
2041 {
2042 const U16 symbol = sortedList[s].symbol;
2043 const U32 weight = sortedList[s].weight;
2044 const U32 nbBits = nbBitsBaseline - weight;
2045 const U32 start = rankVal[weight];
2046 const U32 length = 1 << (targetLog-nbBits);
2047
2048 if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
2049 {
2050 U32 sortedRank;
2051 int minWeight = nbBits + scaleLog;
2052 if (minWeight < 1) minWeight = 1;
2053 sortedRank = rankStart[minWeight];
2054 HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
2055 rankValOrigin[nbBits], minWeight,
2056 sortedList+sortedRank, sortedListSize-sortedRank,
2057 nbBitsBaseline, symbol);
2058 }
2059 else
2060 {
2061 U32 i;
2062 const U32 end = start + length;
2063 HUF_DEltX4 DElt;
2064
2065 MEM_writeLE16(&(DElt.sequence), symbol);
2066 DElt.nbBits = (BYTE)(nbBits);
2067 DElt.length = 1;
2068 for (i = start; i < end; i++)
2069 DTable[i] = DElt;
2070 }
2071 rankVal[weight] += length;
2072 }
2073}
2074
2075static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
2076{
2077 BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
2078 sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
2079 U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
2080 U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
2081 U32* const rankStart = rankStart0+1;
2082 rankVal_t rankVal;
2083 U32 tableLog, maxW, sizeOfSort, nbSymbols;
2084 const U32 memLog = DTable[0];
2085 size_t iSize;
2086 void* dtPtr = DTable;
2087 HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;
2088
2089 HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
2090 if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
2091 //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
2092
2093 iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
2094 if (HUF_isError(iSize)) return iSize;
2095
2096 /* check result */
2097 if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
2098
2099 /* find maxWeight */
2100 for (maxW = tableLog; rankStats[maxW]==0; maxW--)
2101 { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
2102
2103 /* Get start index of each weight */
2104 {
2105 U32 w, nextRankStart = 0;
2106 for (w=1; w<=maxW; w++)
2107 {
2108 U32 current = nextRankStart;
2109 nextRankStart += rankStats[w];
2110 rankStart[w] = current;
2111 }
2112 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
2113 sizeOfSort = nextRankStart;
2114 }
2115
2116 /* sort symbols by weight */
2117 {
2118 U32 s;
2119 for (s=0; s<nbSymbols; s++)
2120 {
2121 U32 w = weightList[s];
2122 U32 r = rankStart[w]++;
2123 sortedSymbol[r].symbol = (BYTE)s;
2124 sortedSymbol[r].weight = (BYTE)w;
2125 }
2126 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
2127 }
2128
2129 /* Build rankVal */
2130 {
2131 const U32 minBits = tableLog+1 - maxW;
2132 U32 nextRankVal = 0;
2133 U32 w, consumed;
2134 const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
2135 U32* rankVal0 = rankVal[0];
2136 for (w=1; w<=maxW; w++)
2137 {
2138 U32 current = nextRankVal;
2139 nextRankVal += rankStats[w] << (w+rescale);
2140 rankVal0[w] = current;
2141 }
2142 for (consumed = minBits; consumed <= memLog - minBits; consumed++)
2143 {
2144 U32* rankValPtr = rankVal[consumed];
2145 for (w = 1; w <= maxW; w++)
2146 {
2147 rankValPtr[w] = rankVal0[w] >> consumed;
2148 }
2149 }
2150 }
2151
2152 HUF_fillDTableX4(dt, memLog,
2153 sortedSymbol, sizeOfSort,
2154 rankStart0, rankVal, maxW,
2155 tableLog+1);
2156
2157 return iSize;
2158}
2159
2160
2161static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
2162{
2163 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2164 memcpy(op, dt+val, 2);
2165 BIT_skipBits(DStream, dt[val].nbBits);
2166 return dt[val].length;
2167}
2168
2169static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
2170{
2171 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2172 memcpy(op, dt+val, 1);
2173 if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
2174 else
2175 {
2176 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
2177 {
2178 BIT_skipBits(DStream, dt[val].nbBits);
2179 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
2180 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
2181 }
2182 }
2183 return 1;
2184}
2185
2186
2187#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
2188 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2189
2190#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
2191 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
2192 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2193
2194#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
2195 if (MEM_64bits()) \
2196 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2197
2198static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
2199{
2200 BYTE* const pStart = p;
2201
2202 /* up to 8 symbols at a time */
2203 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
2204 {
2205 HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
2206 HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
2207 HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
2208 HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
2209 }
2210
2211 /* closer to the end */
2212 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
2213 HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
2214
2215 while (p <= pEnd-2)
2216 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
2217
2218 if (p < pEnd)
2219 p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
2220
2221 return p-pStart;
2222}
2223
2224static size_t HUF_decompress4X4_usingDTable(
2225 void* dst, size_t dstSize,
2226 const void* cSrc, size_t cSrcSize,
2227 const U32* DTable)
2228{
2229 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
2230
2231 {
2232 const BYTE* const istart = (const BYTE*) cSrc;
2233 BYTE* const ostart = (BYTE*) dst;
2234 BYTE* const oend = ostart + dstSize;
2235 const void* const dtPtr = DTable;
2236 const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
2237 const U32 dtLog = DTable[0];
2238 size_t errorCode;
2239
2240 /* Init */
2241 BIT_DStream_t bitD1;
2242 BIT_DStream_t bitD2;
2243 BIT_DStream_t bitD3;
2244 BIT_DStream_t bitD4;
2245 const size_t length1 = MEM_readLE16(istart);
2246 const size_t length2 = MEM_readLE16(istart+2);
2247 const size_t length3 = MEM_readLE16(istart+4);
2248 size_t length4;
2249 const BYTE* const istart1 = istart + 6; /* jumpTable */
2250 const BYTE* const istart2 = istart1 + length1;
2251 const BYTE* const istart3 = istart2 + length2;
2252 const BYTE* const istart4 = istart3 + length3;
2253 const size_t segmentSize = (dstSize+3) / 4;
2254 BYTE* const opStart2 = ostart + segmentSize;
2255 BYTE* const opStart3 = opStart2 + segmentSize;
2256 BYTE* const opStart4 = opStart3 + segmentSize;
2257 BYTE* op1 = ostart;
2258 BYTE* op2 = opStart2;
2259 BYTE* op3 = opStart3;
2260 BYTE* op4 = opStart4;
2261 U32 endSignal;
2262
2263 length4 = cSrcSize - (length1 + length2 + length3 + 6);
2264 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
2265 errorCode = BIT_initDStream(&bitD1, istart1, length1);
2266 if (HUF_isError(errorCode)) return errorCode;
2267 errorCode = BIT_initDStream(&bitD2, istart2, length2);
2268 if (HUF_isError(errorCode)) return errorCode;
2269 errorCode = BIT_initDStream(&bitD3, istart3, length3);
2270 if (HUF_isError(errorCode)) return errorCode;
2271 errorCode = BIT_initDStream(&bitD4, istart4, length4);
2272 if (HUF_isError(errorCode)) return errorCode;
2273
2274 /* 16-32 symbols per loop (4-8 symbols per stream) */
2275 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2276 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
2277 {
2278 HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
2279 HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
2280 HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
2281 HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
2282 HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
2283 HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
2284 HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
2285 HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
2286 HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
2287 HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
2288 HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
2289 HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
2290 HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
2291 HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
2292 HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
2293 HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
2294
2295 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2296 }
2297
2298 /* check corruption */
2299 if (op1 > opStart2) return ERROR(corruption_detected);
2300 if (op2 > opStart3) return ERROR(corruption_detected);
2301 if (op3 > opStart4) return ERROR(corruption_detected);
2302 /* note : op4 supposed already verified within main loop */
2303
2304 /* finish bitStreams one by one */
2305 HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
2306 HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
2307 HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
2308 HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
2309
2310 /* check */
2311 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
2312 if (!endSignal) return ERROR(corruption_detected);
2313
2314 /* decoded size */
2315 return dstSize;
2316 }
2317}
2318
2319
2320static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
2321{
2322 HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
2323 const BYTE* ip = (const BYTE*) cSrc;
2324
2325 size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
2326 if (HUF_isError(hSize)) return hSize;
2327 if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
2328 ip += hSize;
2329 cSrcSize -= hSize;
2330
2331 return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
2332}
2333
2334
2335/**********************************/
2336/* Generic decompression selector */
2337/**********************************/
2338
2339typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
2340static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
2341{
2342 /* single, double, quad */
2343 {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
2344 {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
2345 {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
2346 {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
2347 {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
2348 {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
2349 {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
2350 {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
2351 {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
2352 {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
2353 {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
2354 {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
2355 {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
2356 {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
2357 {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
2358 {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
2359};
2360
2361typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
2362
2363static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
2364{
2365 static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
2366 /* estimate decompression time */
2367 U32 Q;
2368 const U32 D256 = (U32)(dstSize >> 8);
2369 U32 Dtime[3];
2370 U32 algoNb = 0;
2371 int n;
2372
2373 /* validation checks */
2374 if (dstSize == 0) return ERROR(dstSize_tooSmall);
2375 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
2376 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
2377 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
2378
2379 /* decoder timing evaluation */
2380 Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
2381 for (n=0; n<3; n++)
2382 Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
2383
2384 Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
2385
2386 if (Dtime[1] < Dtime[0]) algoNb = 1;
2387
2388 return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
2389
2390 //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
2391 //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
2392 //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
2393}
2394
2395
2396
2397#endif /* ZSTD_CCOMMON_H_MODULE */
2398
2399
2400/*
2401 zstd - decompression module fo v0.4 legacy format
2402 Copyright (C) 2015-2016, Yann Collet.
2403
2404 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
2405
2406 Redistribution and use in source and binary forms, with or without
2407 modification, are permitted provided that the following conditions are
2408 met:
2409 * Redistributions of source code must retain the above copyright
2410 notice, this list of conditions and the following disclaimer.
2411 * Redistributions in binary form must reproduce the above
2412 copyright notice, this list of conditions and the following disclaimer
2413 in the documentation and/or other materials provided with the
2414 distribution.
2415 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2416 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2417 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2418 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2419 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2420 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2421 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2422 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2423 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2424 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2425 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2426
2427 You can contact the author at :
2428 - zstd source repository : https://github.com/Cyan4973/zstd
2429 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
2430*/
2431
2432/* ***************************************************************
2433* Tuning parameters
2434*****************************************************************/
2435/*!
2436 * HEAPMODE :
2437 * Select how default decompression function ZSTD_decompress() will allocate memory,
2438 * in memory stack (0), or in memory heap (1, requires malloc())
2439 */
2440#ifndef ZSTD_HEAPMODE
2441# define ZSTD_HEAPMODE 1
2442#endif
2443
2444
2445/* *******************************************************
2446* Includes
2447*********************************************************/
2448#include <stdlib.h> /* calloc */
2449#include <string.h> /* memcpy, memmove */
2450#include <stdio.h> /* debug : printf */
2451
2452
2453/* *******************************************************
2454* Compiler specifics
2455*********************************************************/
2456#ifdef _MSC_VER /* Visual Studio */
2457# include <intrin.h> /* For Visual 2005 */
2458# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2459# pragma warning(disable : 4324) /* disable: C4324: padded structure */
2460#endif
2461
2462
2463/* *************************************
2464* Local types
2465***************************************/
2466typedef struct
2467{
2468 blockType_t blockType;
2469 U32 origSize;
2470} blockProperties_t;
2471
2472
2473/* *******************************************************
2474* Memory operations
2475**********************************************************/
2476static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
2477
2478
2479/* *************************************
2480* Error Management
2481***************************************/
2482
2483/*! ZSTD_isError
2484* tells if a return value is an error code */
2485static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
2486
2487
2488/* *************************************************************
2489* Context management
2490***************************************************************/
2491typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
2492 ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;
2493
2494struct ZSTDv04_Dctx_s
2495{
2496 U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
2497 U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
2498 U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
2499 const void* previousDstEnd;
2500 const void* base;
2501 const void* vBase;
2502 const void* dictEnd;
2503 size_t expected;
2504 size_t headerSize;
2505 ZSTD_parameters params;
2506 blockType_t bType;
2507 ZSTD_dStage stage;
2508 const BYTE* litPtr;
2509 size_t litSize;
2510 BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
2511 BYTE headerBuffer[ZSTD_frameHeaderSize_max];
2512}; /* typedef'd to ZSTD_DCtx within "zstd_static.h" */
2513
2514static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
2515{
2516 dctx->expected = ZSTD_frameHeaderSize_min;
2517 dctx->stage = ZSTDds_getFrameHeaderSize;
2518 dctx->previousDstEnd = NULL;
2519 dctx->base = NULL;
2520 dctx->vBase = NULL;
2521 dctx->dictEnd = NULL;
2522 return 0;
2523}
2524
2525static ZSTD_DCtx* ZSTD_createDCtx(void)
2526{
2527 ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
2528 if (dctx==NULL) return NULL;
2529 ZSTD_resetDCtx(dctx);
2530 return dctx;
2531}
2532
2533static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
2534{
2535 free(dctx);
2536 return 0;
2537}
2538
2539
2540/* *************************************************************
2541* Decompression section
2542***************************************************************/
2543/** ZSTD_decodeFrameHeader_Part1
2544* decode the 1st part of the Frame Header, which tells Frame Header size.
2545* srcSize must be == ZSTD_frameHeaderSize_min
2546* @return : the full size of the Frame Header */
2547static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)
2548{
2549 U32 magicNumber;
2550 if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
2551 magicNumber = MEM_readLE32(src);
2552 if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
2553 zc->headerSize = ZSTD_frameHeaderSize_min;
2554 return zc->headerSize;
2555}
2556
2557
2558static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
2559{
2560 U32 magicNumber;
2561 if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;
2562 magicNumber = MEM_readLE32(src);
2563 if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
2564 memset(params, 0, sizeof(*params));
2565 params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
2566 if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */
2567 return 0;
2568}
2569
2570/** ZSTD_decodeFrameHeader_Part2
2571* decode the full Frame Header
2572* srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1
2573* @return : 0, or an error code, which can be tested using ZSTD_isError() */
2574static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)
2575{
2576 size_t result;
2577 if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
2578 result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
2579 if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
2580 return result;
2581}
2582
2583
2584static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
2585{
2586 const BYTE* const in = (const BYTE* const)src;
2587 BYTE headerFlags;
2588 U32 cSize;
2589
2590 if (srcSize < 3) return ERROR(srcSize_wrong);
2591
2592 headerFlags = *in;
2593 cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
2594
2595 bpPtr->blockType = (blockType_t)(headerFlags >> 6);
2596 bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
2597
2598 if (bpPtr->blockType == bt_end) return 0;
2599 if (bpPtr->blockType == bt_rle) return 1;
2600 return cSize;
2601}
2602
2603static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
2604{
2605 if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
2606 memcpy(dst, src, srcSize);
2607 return srcSize;
2608}
2609
2610
2611/** ZSTD_decompressLiterals
2612 @return : nb of bytes read from src, or an error code*/
2613static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
2614 const void* src, size_t srcSize)
2615{
2616 const BYTE* ip = (const BYTE*)src;
2617
2618 const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2619 const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2620
2621 if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
2622 if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
2623
2624 if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
2625
2626 *maxDstSizePtr = litSize;
2627 return litCSize + 5;
2628}
2629
2630
2631/** ZSTD_decodeLiteralsBlock
2632 @return : nb of bytes read from src (< srcSize ) */
2633static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
2634 const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
2635{
2636 const BYTE* const istart = (const BYTE*) src;
2637
2638 /* any compressed block with literals segment must be at least this size */
2639 if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
2640
2641 switch(*istart & 3)
2642 {
2643 /* compressed */
2644 case 0:
2645 {
2646 size_t litSize = BLOCKSIZE;
2647 const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
2648 dctx->litPtr = dctx->litBuffer;
2649 dctx->litSize = litSize;
2650 memset(dctx->litBuffer + dctx->litSize, 0, 8);
2651 return readSize; /* works if it's an error too */
2652 }
2653 case IS_RAW:
2654 {
2655 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2656 if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
2657 {
2658 if (litSize > srcSize-3) return ERROR(corruption_detected);
2659 memcpy(dctx->litBuffer, istart, litSize);
2660 dctx->litPtr = dctx->litBuffer;
2661 dctx->litSize = litSize;
2662 memset(dctx->litBuffer + dctx->litSize, 0, 8);
2663 return litSize+3;
2664 }
2665 /* direct reference into compressed stream */
2666 dctx->litPtr = istart+3;
2667 dctx->litSize = litSize;
2668 return litSize+3; }
2669 case IS_RLE:
2670 {
2671 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2672 if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
2673 memset(dctx->litBuffer, istart[3], litSize + 8);
2674 dctx->litPtr = dctx->litBuffer;
2675 dctx->litSize = litSize;
2676 return 4;
2677 }
2678 default:
2679 return ERROR(corruption_detected); /* forbidden nominal case */
2680 }
2681}
2682
2683
2684static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
2685 FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
2686 const void* src, size_t srcSize)
2687{
2688 const BYTE* const istart = (const BYTE* const)src;
2689 const BYTE* ip = istart;
2690 const BYTE* const iend = istart + srcSize;
2691 U32 LLtype, Offtype, MLtype;
2692 U32 LLlog, Offlog, MLlog;
2693 size_t dumpsLength;
2694
2695 /* check */
2696 if (srcSize < 5) return ERROR(srcSize_wrong);
2697
2698 /* SeqHead */
2699 *nbSeq = MEM_readLE16(ip); ip+=2;
2700 LLtype = *ip >> 6;
2701 Offtype = (*ip >> 4) & 3;
2702 MLtype = (*ip >> 2) & 3;
2703 if (*ip & 2)
2704 {
2705 dumpsLength = ip[2];
2706 dumpsLength += ip[1] << 8;
2707 ip += 3;
2708 }
2709 else
2710 {
2711 dumpsLength = ip[1];
2712 dumpsLength += (ip[0] & 1) << 8;
2713 ip += 2;
2714 }
2715 *dumpsPtr = ip;
2716 ip += dumpsLength;
2717 *dumpsLengthPtr = dumpsLength;
2718
2719 /* check */
2720 if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
2721
2722 /* sequences */
2723 {
2724 S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */
2725 size_t headerSize;
2726
2727 /* Build DTables */
2728 switch(LLtype)
2729 {
2730 case bt_rle :
2731 LLlog = 0;
2732 FSE_buildDTable_rle(DTableLL, *ip++); break;
2733 case bt_raw :
2734 LLlog = LLbits;
2735 FSE_buildDTable_raw(DTableLL, LLbits); break;
2736 default :
2737 { U32 max = MaxLL;
2738 headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
2739 if (FSE_isError(headerSize)) return ERROR(GENERIC);
2740 if (LLlog > LLFSELog) return ERROR(corruption_detected);
2741 ip += headerSize;
2742 FSE_buildDTable(DTableLL, norm, max, LLlog);
2743 } }
2744
2745 switch(Offtype)
2746 {
2747 case bt_rle :
2748 Offlog = 0;
2749 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
2750 FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
2751 break;
2752 case bt_raw :
2753 Offlog = Offbits;
2754 FSE_buildDTable_raw(DTableOffb, Offbits); break;
2755 default :
2756 { U32 max = MaxOff;
2757 headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
2758 if (FSE_isError(headerSize)) return ERROR(GENERIC);
2759 if (Offlog > OffFSELog) return ERROR(corruption_detected);
2760 ip += headerSize;
2761 FSE_buildDTable(DTableOffb, norm, max, Offlog);
2762 } }
2763
2764 switch(MLtype)
2765 {
2766 case bt_rle :
2767 MLlog = 0;
2768 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
2769 FSE_buildDTable_rle(DTableML, *ip++); break;
2770 case bt_raw :
2771 MLlog = MLbits;
2772 FSE_buildDTable_raw(DTableML, MLbits); break;
2773 default :
2774 { U32 max = MaxML;
2775 headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
2776 if (FSE_isError(headerSize)) return ERROR(GENERIC);
2777 if (MLlog > MLFSELog) return ERROR(corruption_detected);
2778 ip += headerSize;
2779 FSE_buildDTable(DTableML, norm, max, MLlog);
2780 } } }
2781
2782 return ip-istart;
2783}
2784
2785
2786typedef struct {
2787 size_t litLength;
2788 size_t offset;
2789 size_t matchLength;
2790} seq_t;
2791
2792typedef struct {
2793 BIT_DStream_t DStream;
2794 FSE_DState_t stateLL;
2795 FSE_DState_t stateOffb;
2796 FSE_DState_t stateML;
2797 size_t prevOffset;
2798 const BYTE* dumps;
2799 const BYTE* dumpsEnd;
2800} seqState_t;
2801
2802
2803static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
2804{
2805 size_t litLength;
2806 size_t prevOffset;
2807 size_t offset;
2808 size_t matchLength;
2809 const BYTE* dumps = seqState->dumps;
2810 const BYTE* const de = seqState->dumpsEnd;
2811
2812 /* Literal length */
2813 litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
2814 prevOffset = litLength ? seq->offset : seqState->prevOffset;
2815 if (litLength == MaxLL) {
David Bainbridge788e5202019-10-21 18:49:40 +00002816 const U32 add = dumps<de ? *dumps++ : 0;
William Kurkianea869482019-04-09 15:16:11 -04002817 if (add < 255) litLength += add;
David Bainbridge788e5202019-10-21 18:49:40 +00002818 else if (dumps + 3 <= de) {
2819 litLength = MEM_readLE24(dumps);
William Kurkianea869482019-04-09 15:16:11 -04002820 dumps += 3;
2821 }
William Kurkianea869482019-04-09 15:16:11 -04002822 if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
2823 }
2824
2825 /* Offset */
2826 { static const U32 offsetPrefix[MaxOff+1] = {
2827 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
2828 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
2829 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
2830 U32 offsetCode, nbBits;
2831 offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
2832 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
2833 nbBits = offsetCode - 1;
2834 if (offsetCode==0) nbBits = 0; /* cmove */
2835 offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
2836 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
2837 if (offsetCode==0) offset = prevOffset; /* cmove */
2838 if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */
2839 }
2840
2841 /* MatchLength */
2842 matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
2843 if (matchLength == MaxML) {
David Bainbridge788e5202019-10-21 18:49:40 +00002844 const U32 add = dumps<de ? *dumps++ : 0;
William Kurkianea869482019-04-09 15:16:11 -04002845 if (add < 255) matchLength += add;
David Bainbridge788e5202019-10-21 18:49:40 +00002846 else if (dumps + 3 <= de){
2847 matchLength = MEM_readLE24(dumps);
William Kurkianea869482019-04-09 15:16:11 -04002848 dumps += 3;
2849 }
William Kurkianea869482019-04-09 15:16:11 -04002850 if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
2851 }
2852 matchLength += MINMATCH;
2853
2854 /* save result */
2855 seq->litLength = litLength;
2856 seq->offset = offset;
2857 seq->matchLength = matchLength;
2858 seqState->dumps = dumps;
2859}
2860
2861
2862static size_t ZSTD_execSequence(BYTE* op,
2863 BYTE* const oend, seq_t sequence,
2864 const BYTE** litPtr, const BYTE* const litLimit,
2865 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
2866{
2867 static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
Abhilash S.L3b494632019-07-16 15:51:09 +05302868 static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
William Kurkianea869482019-04-09 15:16:11 -04002869 BYTE* const oLitEnd = op + sequence.litLength;
2870 const size_t sequenceLength = sequence.litLength + sequence.matchLength;
2871 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
2872 BYTE* const oend_8 = oend-8;
2873 const BYTE* const litEnd = *litPtr + sequence.litLength;
2874 const BYTE* match = oLitEnd - sequence.offset;
2875
2876 /* check */
2877 if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
2878 if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
2879 if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
2880
2881 /* copy Literals */
2882 ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
2883 op = oLitEnd;
2884 *litPtr = litEnd; /* update for next sequence */
2885
2886 /* copy Match */
2887 if (sequence.offset > (size_t)(oLitEnd - base))
2888 {
2889 /* offset beyond prefix */
2890 if (sequence.offset > (size_t)(oLitEnd - vBase))
2891 return ERROR(corruption_detected);
2892 match = dictEnd - (base-match);
2893 if (match + sequence.matchLength <= dictEnd)
2894 {
2895 memmove(oLitEnd, match, sequence.matchLength);
2896 return sequenceLength;
2897 }
2898 /* span extDict & currentPrefixSegment */
2899 {
2900 size_t length1 = dictEnd - match;
2901 memmove(oLitEnd, match, length1);
2902 op = oLitEnd + length1;
2903 sequence.matchLength -= length1;
2904 match = base;
2905 if (op > oend_8 || sequence.matchLength < MINMATCH) {
2906 while (op < oMatchEnd) *op++ = *match++;
2907 return sequenceLength;
2908 }
2909 }
2910 }
2911 /* Requirement: op <= oend_8 */
2912
2913 /* match within prefix */
2914 if (sequence.offset < 8) {
2915 /* close range match, overlap */
2916 const int sub2 = dec64table[sequence.offset];
2917 op[0] = match[0];
2918 op[1] = match[1];
2919 op[2] = match[2];
2920 op[3] = match[3];
2921 match += dec32table[sequence.offset];
2922 ZSTD_copy4(op+4, match);
2923 match -= sub2;
2924 } else {
2925 ZSTD_copy8(op, match);
2926 }
2927 op += 8; match += 8;
2928
2929 if (oMatchEnd > oend-(16-MINMATCH))
2930 {
2931 if (op < oend_8)
2932 {
2933 ZSTD_wildcopy(op, match, oend_8 - op);
2934 match += oend_8 - op;
2935 op = oend_8;
2936 }
2937 while (op < oMatchEnd) *op++ = *match++;
2938 }
2939 else
2940 {
Abhilash S.L3b494632019-07-16 15:51:09 +05302941 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8, but must be signed */
William Kurkianea869482019-04-09 15:16:11 -04002942 }
2943 return sequenceLength;
2944}
2945
2946
2947static size_t ZSTD_decompressSequences(
2948 ZSTD_DCtx* dctx,
2949 void* dst, size_t maxDstSize,
2950 const void* seqStart, size_t seqSize)
2951{
2952 const BYTE* ip = (const BYTE*)seqStart;
2953 const BYTE* const iend = ip + seqSize;
2954 BYTE* const ostart = (BYTE* const)dst;
2955 BYTE* op = ostart;
2956 BYTE* const oend = ostart + maxDstSize;
2957 size_t errorCode, dumpsLength;
2958 const BYTE* litPtr = dctx->litPtr;
2959 const BYTE* const litEnd = litPtr + dctx->litSize;
2960 int nbSeq;
2961 const BYTE* dumps;
2962 U32* DTableLL = dctx->LLTable;
2963 U32* DTableML = dctx->MLTable;
2964 U32* DTableOffb = dctx->OffTable;
2965 const BYTE* const base = (const BYTE*) (dctx->base);
2966 const BYTE* const vBase = (const BYTE*) (dctx->vBase);
2967 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
2968
2969 /* Build Decoding Tables */
2970 errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
2971 DTableLL, DTableML, DTableOffb,
2972 ip, iend-ip);
2973 if (ZSTD_isError(errorCode)) return errorCode;
2974 ip += errorCode;
2975
2976 /* Regen sequences */
2977 {
2978 seq_t sequence;
2979 seqState_t seqState;
2980
2981 memset(&sequence, 0, sizeof(sequence));
2982 sequence.offset = 4;
2983 seqState.dumps = dumps;
2984 seqState.dumpsEnd = dumps + dumpsLength;
2985 seqState.prevOffset = 4;
2986 errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
2987 if (ERR_isError(errorCode)) return ERROR(corruption_detected);
2988 FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
2989 FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
2990 FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
2991
2992 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )
2993 {
2994 size_t oneSeqSize;
2995 nbSeq--;
2996 ZSTD_decodeSequence(&sequence, &seqState);
2997 oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
2998 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
2999 op += oneSeqSize;
3000 }
3001
3002 /* check if reached exact end */
3003 if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* DStream should be entirely and exactly consumed; otherwise data is corrupted */
3004
3005 /* last literal segment */
3006 {
3007 size_t lastLLSize = litEnd - litPtr;
3008 if (litPtr > litEnd) return ERROR(corruption_detected);
3009 if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
3010 if (op != litPtr) memcpy(op, litPtr, lastLLSize);
3011 op += lastLLSize;
3012 }
3013 }
3014
3015 return op-ostart;
3016}
3017
3018
3019static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
3020{
3021 if (dst != dctx->previousDstEnd) /* not contiguous */
3022 {
3023 dctx->dictEnd = dctx->previousDstEnd;
3024 dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
3025 dctx->base = dst;
3026 dctx->previousDstEnd = dst;
3027 }
3028}
3029
3030
3031static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
3032 void* dst, size_t maxDstSize,
3033 const void* src, size_t srcSize)
3034{
3035 /* blockType == blockCompressed */
3036 const BYTE* ip = (const BYTE*)src;
3037
3038 /* Decode literals sub-block */
3039 size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
3040 if (ZSTD_isError(litCSize)) return litCSize;
3041 ip += litCSize;
3042 srcSize -= litCSize;
3043
3044 return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);
3045}
3046
3047
3048static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
3049 void* dst, size_t maxDstSize,
3050 const void* src, size_t srcSize,
3051 const void* dict, size_t dictSize)
3052{
3053 const BYTE* ip = (const BYTE*)src;
3054 const BYTE* iend = ip + srcSize;
3055 BYTE* const ostart = (BYTE* const)dst;
3056 BYTE* op = ostart;
3057 BYTE* const oend = ostart + maxDstSize;
3058 size_t remainingSize = srcSize;
3059 blockProperties_t blockProperties;
3060
3061 /* init */
3062 ZSTD_resetDCtx(ctx);
3063 if (dict)
3064 {
3065 ZSTD_decompress_insertDictionary(ctx, dict, dictSize);
3066 ctx->dictEnd = ctx->previousDstEnd;
3067 ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
3068 ctx->base = dst;
3069 }
3070 else
3071 {
3072 ctx->vBase = ctx->base = ctx->dictEnd = dst;
3073 }
3074
3075 /* Frame Header */
3076 {
3077 size_t frameHeaderSize;
3078 if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
3079 frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
3080 if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
3081 if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
3082 ip += frameHeaderSize; remainingSize -= frameHeaderSize;
3083 frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);
3084 if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
3085 }
3086
3087 /* Loop on each block */
3088 while (1)
3089 {
3090 size_t decodedSize=0;
3091 size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
3092 if (ZSTD_isError(cBlockSize)) return cBlockSize;
3093
3094 ip += ZSTD_blockHeaderSize;
3095 remainingSize -= ZSTD_blockHeaderSize;
3096 if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
3097
3098 switch(blockProperties.blockType)
3099 {
3100 case bt_compressed:
3101 decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);
3102 break;
3103 case bt_raw :
3104 decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
3105 break;
3106 case bt_rle :
3107 return ERROR(GENERIC); /* not yet supported */
3108 break;
3109 case bt_end :
3110 /* end of frame */
3111 if (remainingSize) return ERROR(srcSize_wrong);
3112 break;
3113 default:
3114 return ERROR(GENERIC); /* impossible */
3115 }
3116 if (cBlockSize == 0) break; /* bt_end */
3117
3118 if (ZSTD_isError(decodedSize)) return decodedSize;
3119 op += decodedSize;
3120 ip += cBlockSize;
3121 remainingSize -= cBlockSize;
3122 }
3123
3124 return op-ostart;
3125}
3126
Abhilash S.L3b494632019-07-16 15:51:09 +05303127/* ZSTD_errorFrameSizeInfoLegacy() :
3128 assumes `cSize` and `dBound` are _not_ NULL */
3129static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
3130{
3131 *cSize = ret;
3132 *dBound = ZSTD_CONTENTSIZE_ERROR;
3133}
3134
3135void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
William Kurkianea869482019-04-09 15:16:11 -04003136{
3137 const BYTE* ip = (const BYTE*)src;
3138 size_t remainingSize = srcSize;
Abhilash S.L3b494632019-07-16 15:51:09 +05303139 size_t nbBlocks = 0;
William Kurkianea869482019-04-09 15:16:11 -04003140 blockProperties_t blockProperties;
3141
3142 /* Frame Header */
Abhilash S.L3b494632019-07-16 15:51:09 +05303143 if (srcSize < ZSTD_frameHeaderSize_min) {
3144 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
3145 return;
3146 }
3147 if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
3148 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
3149 return;
3150 }
William Kurkianea869482019-04-09 15:16:11 -04003151 ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
3152
3153 /* Loop on each block */
3154 while (1)
3155 {
3156 size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
Abhilash S.L3b494632019-07-16 15:51:09 +05303157 if (ZSTD_isError(cBlockSize)) {
3158 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
3159 return;
3160 }
William Kurkianea869482019-04-09 15:16:11 -04003161
3162 ip += ZSTD_blockHeaderSize;
3163 remainingSize -= ZSTD_blockHeaderSize;
Abhilash S.L3b494632019-07-16 15:51:09 +05303164 if (cBlockSize > remainingSize) {
3165 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
3166 return;
3167 }
William Kurkianea869482019-04-09 15:16:11 -04003168
3169 if (cBlockSize == 0) break; /* bt_end */
3170
3171 ip += cBlockSize;
3172 remainingSize -= cBlockSize;
Abhilash S.L3b494632019-07-16 15:51:09 +05303173 nbBlocks++;
William Kurkianea869482019-04-09 15:16:11 -04003174 }
3175
Abhilash S.L3b494632019-07-16 15:51:09 +05303176 *cSize = ip - (const BYTE*)src;
3177 *dBound = nbBlocks * BLOCKSIZE;
William Kurkianea869482019-04-09 15:16:11 -04003178}
3179
3180/* ******************************
3181* Streaming Decompression API
3182********************************/
3183static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
3184{
3185 return dctx->expected;
3186}
3187
3188static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3189{
3190 /* Sanity check */
3191 if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
3192 ZSTD_checkContinuity(ctx, dst);
3193
3194 /* Decompress : frame header; part 1 */
3195 switch (ctx->stage)
3196 {
3197 case ZSTDds_getFrameHeaderSize :
3198 /* get frame header size */
3199 if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
3200 ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
3201 if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;
3202 memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);
3203 if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC); /* impossible */
3204 ctx->expected = 0; /* not necessary to copy more */
3205 /* fallthrough */
3206 case ZSTDds_decodeFrameHeader:
3207 /* get frame header */
3208 { size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);
3209 if (ZSTD_isError(result)) return result;
3210 ctx->expected = ZSTD_blockHeaderSize;
3211 ctx->stage = ZSTDds_decodeBlockHeader;
3212 return 0;
3213 }
3214 case ZSTDds_decodeBlockHeader:
3215 /* Decode block header */
3216 { blockProperties_t bp;
3217 size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
3218 if (ZSTD_isError(blockSize)) return blockSize;
3219 if (bp.blockType == bt_end)
3220 {
3221 ctx->expected = 0;
3222 ctx->stage = ZSTDds_getFrameHeaderSize;
3223 }
3224 else
3225 {
3226 ctx->expected = blockSize;
3227 ctx->bType = bp.blockType;
3228 ctx->stage = ZSTDds_decompressBlock;
3229 }
3230 return 0;
3231 }
3232 case ZSTDds_decompressBlock:
3233 {
3234 /* Decompress : block content */
3235 size_t rSize;
3236 switch(ctx->bType)
3237 {
3238 case bt_compressed:
3239 rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);
3240 break;
3241 case bt_raw :
3242 rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);
3243 break;
3244 case bt_rle :
3245 return ERROR(GENERIC); /* not yet handled */
3246 break;
3247 case bt_end : /* should never happen (filtered at phase 1) */
3248 rSize = 0;
3249 break;
3250 default:
3251 return ERROR(GENERIC);
3252 }
3253 ctx->stage = ZSTDds_decodeBlockHeader;
3254 ctx->expected = ZSTD_blockHeaderSize;
3255 ctx->previousDstEnd = (char*)dst + rSize;
3256 return rSize;
3257 }
3258 default:
3259 return ERROR(GENERIC); /* impossible */
3260 }
3261}
3262
3263
3264static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)
3265{
3266 ctx->dictEnd = ctx->previousDstEnd;
3267 ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
3268 ctx->base = dict;
3269 ctx->previousDstEnd = (const char*)dict + dictSize;
3270}
3271
3272
3273
3274/*
3275 Buffered version of Zstd compression library
3276 Copyright (C) 2015, Yann Collet.
3277
3278 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
3279
3280 Redistribution and use in source and binary forms, with or without
3281 modification, are permitted provided that the following conditions are
3282 met:
3283 * Redistributions of source code must retain the above copyright
3284 notice, this list of conditions and the following disclaimer.
3285 * Redistributions in binary form must reproduce the above
3286 copyright notice, this list of conditions and the following disclaimer
3287 in the documentation and/or other materials provided with the
3288 distribution.
3289 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
3290 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
3291 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
3292 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3293 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3294 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3295 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3296 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3297 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3298 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3299 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3300
3301 You can contact the author at :
3302 - zstd source repository : https://github.com/Cyan4973/zstd
3303 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
3304*/
3305
3306/* The objects defined into this file should be considered experimental.
3307 * They are not labelled stable, as their prototype may change in the future.
3308 * You can use them for tests, provide feedback, or if you can endure risk of future changes.
3309 */
3310
3311/* *************************************
3312* Includes
3313***************************************/
3314#include <stdlib.h>
3315
3316
3317/** ************************************************
3318* Streaming decompression
3319*
3320* A ZBUFF_DCtx object is required to track streaming operation.
3321* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
3322* Use ZBUFF_decompressInit() to start a new decompression operation.
3323* ZBUFF_DCtx objects can be reused multiple times.
3324*
3325* Use ZBUFF_decompressContinue() repetitively to consume your input.
3326* *srcSizePtr and *maxDstSizePtr can be any size.
3327* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
3328* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
3329* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
3330* return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
3331* or 0 when a frame is completely decoded
3332* or an error code, which can be tested using ZBUFF_isError().
3333*
3334* Hint : recommended buffer sizes (not compulsory)
3335* output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
3336* input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
3337* **************************************************/
3338
3339typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,
3340 ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;
3341
3342/* *** Resource management *** */
3343
3344#define ZSTD_frameHeaderSize_max 5 /* too magical, should come from reference */
3345struct ZBUFFv04_DCtx_s {
3346 ZSTD_DCtx* zc;
3347 ZSTD_parameters params;
3348 char* inBuff;
3349 size_t inBuffSize;
3350 size_t inPos;
3351 char* outBuff;
3352 size_t outBuffSize;
3353 size_t outStart;
3354 size_t outEnd;
3355 size_t hPos;
3356 const char* dict;
3357 size_t dictSize;
3358 ZBUFF_dStage stage;
3359 unsigned char headerBuffer[ZSTD_frameHeaderSize_max];
3360}; /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */
3361
3362typedef ZBUFFv04_DCtx ZBUFF_DCtx;
3363
3364
3365static ZBUFF_DCtx* ZBUFF_createDCtx(void)
3366{
3367 ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));
3368 if (zbc==NULL) return NULL;
3369 memset(zbc, 0, sizeof(*zbc));
3370 zbc->zc = ZSTD_createDCtx();
3371 zbc->stage = ZBUFFds_init;
3372 return zbc;
3373}
3374
3375static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)
3376{
3377 if (zbc==NULL) return 0; /* support free on null */
3378 ZSTD_freeDCtx(zbc->zc);
3379 free(zbc->inBuff);
3380 free(zbc->outBuff);
3381 free(zbc);
3382 return 0;
3383}
3384
3385
3386/* *** Initialization *** */
3387
3388static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)
3389{
3390 zbc->stage = ZBUFFds_readHeader;
3391 zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;
3392 return ZSTD_resetDCtx(zbc->zc);
3393}
3394
3395
3396static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)
3397{
3398 zbc->dict = (const char*)src;
3399 zbc->dictSize = srcSize;
3400 return 0;
3401}
3402
3403static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3404{
3405 size_t length = MIN(maxDstSize, srcSize);
3406 memcpy(dst, src, length);
3407 return length;
3408}
3409
3410/* *** Decompression *** */
3411
3412static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
3413{
3414 const char* const istart = (const char*)src;
3415 const char* ip = istart;
3416 const char* const iend = istart + *srcSizePtr;
3417 char* const ostart = (char*)dst;
3418 char* op = ostart;
3419 char* const oend = ostart + *maxDstSizePtr;
3420 U32 notDone = 1;
3421
3422 DEBUGLOG(5, "ZBUFF_decompressContinue");
3423 while (notDone)
3424 {
3425 switch(zbc->stage)
3426 {
3427
3428 case ZBUFFds_init :
3429 DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
3430 return ERROR(init_missing);
3431
3432 case ZBUFFds_readHeader :
3433 /* read header from src */
3434 { size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);
3435 if (ZSTD_isError(headerSize)) return headerSize;
3436 if (headerSize) {
3437 /* not enough input to decode header : tell how many bytes would be necessary */
3438 memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
3439 zbc->hPos += *srcSizePtr;
3440 *maxDstSizePtr = 0;
3441 zbc->stage = ZBUFFds_loadHeader;
3442 return headerSize - zbc->hPos;
3443 }
3444 zbc->stage = ZBUFFds_decodeHeader;
3445 break;
3446 }
3447
3448 case ZBUFFds_loadHeader:
3449 /* complete header from src */
3450 { size_t headerSize = ZBUFF_limitCopy(
3451 zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,
3452 src, *srcSizePtr);
3453 zbc->hPos += headerSize;
3454 ip += headerSize;
3455 headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
3456 if (ZSTD_isError(headerSize)) return headerSize;
3457 if (headerSize) {
3458 /* not enough input to decode header : tell how many bytes would be necessary */
3459 *maxDstSizePtr = 0;
3460 return headerSize - zbc->hPos;
3461 } }
3462 /* intentional fallthrough */
3463
3464 case ZBUFFds_decodeHeader:
3465 /* apply header to create / resize buffers */
3466 { size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;
3467 size_t const neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */
3468 if (zbc->inBuffSize < neededInSize) {
3469 free(zbc->inBuff);
3470 zbc->inBuffSize = neededInSize;
3471 zbc->inBuff = (char*)malloc(neededInSize);
3472 if (zbc->inBuff == NULL) return ERROR(memory_allocation);
3473 }
3474 if (zbc->outBuffSize < neededOutSize) {
3475 free(zbc->outBuff);
3476 zbc->outBuffSize = neededOutSize;
3477 zbc->outBuff = (char*)malloc(neededOutSize);
3478 if (zbc->outBuff == NULL) return ERROR(memory_allocation);
3479 } }
3480 if (zbc->dictSize)
3481 ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);
3482 if (zbc->hPos) {
3483 /* some data already loaded into headerBuffer : transfer into inBuff */
3484 memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
3485 zbc->inPos = zbc->hPos;
3486 zbc->hPos = 0;
3487 zbc->stage = ZBUFFds_load;
3488 break;
3489 }
3490 zbc->stage = ZBUFFds_read;
3491 /* fall-through */
3492 case ZBUFFds_read:
3493 {
3494 size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
3495 if (neededInSize==0) /* end of frame */
3496 {
3497 zbc->stage = ZBUFFds_init;
3498 notDone = 0;
3499 break;
3500 }
3501 if ((size_t)(iend-ip) >= neededInSize)
3502 {
3503 /* directly decode from src */
3504 size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
3505 zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
3506 ip, neededInSize);
3507 if (ZSTD_isError(decodedSize)) return decodedSize;
3508 ip += neededInSize;
3509 if (!decodedSize) break; /* this was just a header */
3510 zbc->outEnd = zbc->outStart + decodedSize;
3511 zbc->stage = ZBUFFds_flush;
3512 break;
3513 }
3514 if (ip==iend) { notDone = 0; break; } /* no more input */
3515 zbc->stage = ZBUFFds_load;
3516 }
3517 /* fall-through */
3518 case ZBUFFds_load:
3519 {
3520 size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
3521 size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */
3522 size_t loadedSize;
3523 if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */
3524 loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
3525 ip += loadedSize;
3526 zbc->inPos += loadedSize;
3527 if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
3528 {
3529 size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
3530 zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
3531 zbc->inBuff, neededInSize);
3532 if (ZSTD_isError(decodedSize)) return decodedSize;
3533 zbc->inPos = 0; /* input is consumed */
3534 if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */
3535 zbc->outEnd = zbc->outStart + decodedSize;
3536 zbc->stage = ZBUFFds_flush;
3537 /* ZBUFFds_flush follows */
3538 }
3539 }
3540 /* fall-through */
3541 case ZBUFFds_flush:
3542 {
3543 size_t toFlushSize = zbc->outEnd - zbc->outStart;
3544 size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
3545 op += flushedSize;
3546 zbc->outStart += flushedSize;
3547 if (flushedSize == toFlushSize)
3548 {
3549 zbc->stage = ZBUFFds_read;
3550 if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
3551 zbc->outStart = zbc->outEnd = 0;
3552 break;
3553 }
3554 /* cannot flush everything */
3555 notDone = 0;
3556 break;
3557 }
3558 default: return ERROR(GENERIC); /* impossible */
3559 }
3560 }
3561
3562 *srcSizePtr = ip-istart;
3563 *maxDstSizePtr = op-ostart;
3564
3565 {
3566 size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);
3567 if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3; /* get the next block header while at it */
3568 nextSrcSizeHint -= zbc->inPos; /* already loaded*/
3569 return nextSrcSizeHint;
3570 }
3571}
3572
3573
3574/* *************************************
3575* Tool functions
3576***************************************/
3577unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
3578const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
3579
3580size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; }
3581size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }
3582
3583
3584
3585/*- ========================================================================= -*/
3586
3587/* final wrapping stage */
3588
3589size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3590{
3591 return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
3592}
3593
3594size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3595{
3596#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
3597 size_t regenSize;
3598 ZSTD_DCtx* dctx = ZSTD_createDCtx();
3599 if (dctx==NULL) return ERROR(memory_allocation);
3600 regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
3601 ZSTD_freeDCtx(dctx);
3602 return regenSize;
3603#else
3604 ZSTD_DCtx dctx;
3605 return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
3606#endif
3607}
3608
William Kurkianea869482019-04-09 15:16:11 -04003609size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
3610
3611size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
3612{
3613 return ZSTD_nextSrcSizeToDecompress(dctx);
3614}
3615
3616size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3617{
3618 return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);
3619}
3620
3621
3622
3623ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
3624size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
3625
3626size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
3627size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
3628{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }
3629
3630size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
3631{
3632 DEBUGLOG(5, "ZBUFFv04_decompressContinue");
3633 return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
3634}
3635
3636ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
3637size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }