blob: cc3cbb9da9ff54b872e3351a32f5637193ab4b96 [file] [log] [blame]
kesavand2cde6582020-06-22 04:56:23 -04001/*
2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11/* This header contains definitions
12 * that shall **only** be used by modules within lib/compress.
13 */
14
15#ifndef ZSTD_COMPRESS_H
16#define ZSTD_COMPRESS_H
17
18/*-*************************************
19* Dependencies
20***************************************/
21#include "zstd_internal.h"
22#ifdef ZSTD_MULTITHREAD
23# include "zstdmt_compress.h"
24#endif
25
26#if defined (__cplusplus)
27extern "C" {
28#endif
29
30
31/*-*************************************
32* Constants
33***************************************/
34#define kSearchStrength 8
35#define HASH_READ_SIZE 8
36#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
37 It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
38 It's not a big deal though : candidate will just be sorted again.
39 Additionally, candidate position 1 will be lost.
40 But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
41 The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
42 Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
43
44
45/*-*************************************
46* Context memory management
47***************************************/
48typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
49typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
50
51typedef struct ZSTD_prefixDict_s {
52 const void* dict;
53 size_t dictSize;
54 ZSTD_dictContentType_e dictContentType;
55} ZSTD_prefixDict;
56
57typedef struct {
58 void* dictBuffer;
59 void const* dict;
60 size_t dictSize;
61 ZSTD_dictContentType_e dictContentType;
62 ZSTD_CDict* cdict;
63} ZSTD_localDict;
64
65typedef struct {
66 U32 CTable[HUF_CTABLE_SIZE_U32(255)];
67 HUF_repeat repeatMode;
68} ZSTD_hufCTables_t;
69
70typedef struct {
71 FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
72 FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
73 FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
74 FSE_repeat offcode_repeatMode;
75 FSE_repeat matchlength_repeatMode;
76 FSE_repeat litlength_repeatMode;
77} ZSTD_fseCTables_t;
78
79typedef struct {
80 ZSTD_hufCTables_t huf;
81 ZSTD_fseCTables_t fse;
82} ZSTD_entropyCTables_t;
83
84typedef struct {
85 U32 off;
86 U32 len;
87} ZSTD_match_t;
88
89typedef struct {
90 int price;
91 U32 off;
92 U32 mlen;
93 U32 litlen;
94 U32 rep[ZSTD_REP_NUM];
95} ZSTD_optimal_t;
96
97typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
98
99typedef struct {
100 /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
101 unsigned* litFreq; /* table of literals statistics, of size 256 */
102 unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
103 unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
104 unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
105 ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
106 ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
107
108 U32 litSum; /* nb of literals */
109 U32 litLengthSum; /* nb of litLength codes */
110 U32 matchLengthSum; /* nb of matchLength codes */
111 U32 offCodeSum; /* nb of offset codes */
112 U32 litSumBasePrice; /* to compare to log2(litfreq) */
113 U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
114 U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
115 U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
116 ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
117 const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
118 ZSTD_literalCompressionMode_e literalCompressionMode;
119} optState_t;
120
121typedef struct {
122 ZSTD_entropyCTables_t entropy;
123 U32 rep[ZSTD_REP_NUM];
124} ZSTD_compressedBlockState_t;
125
126typedef struct {
127 BYTE const* nextSrc; /* next block here to continue on current prefix */
128 BYTE const* base; /* All regular indexes relative to this position */
129 BYTE const* dictBase; /* extDict indexes relative to this position */
130 U32 dictLimit; /* below that point, need extDict */
131 U32 lowLimit; /* below that point, no more data */
132} ZSTD_window_t;
133
134typedef struct ZSTD_matchState_t ZSTD_matchState_t;
135struct ZSTD_matchState_t {
136 ZSTD_window_t window; /* State for window round buffer management */
137 U32 loadedDictEnd; /* index of end of dictionary */
138 U32 nextToUpdate; /* index from which to continue table update */
139 U32 nextToUpdate3; /* index from which to continue table update */
140 U32 hashLog3; /* dispatch table : larger == faster, more memory */
141 U32* hashTable;
142 U32* hashTable3;
143 U32* chainTable;
144 optState_t opt; /* optimal parser state */
145 const ZSTD_matchState_t * dictMatchState;
146 ZSTD_compressionParameters cParams;
147};
148
149typedef struct {
150 ZSTD_compressedBlockState_t* prevCBlock;
151 ZSTD_compressedBlockState_t* nextCBlock;
152 ZSTD_matchState_t matchState;
153} ZSTD_blockState_t;
154
155typedef struct {
156 U32 offset;
157 U32 checksum;
158} ldmEntry_t;
159
160typedef struct {
161 ZSTD_window_t window; /* State for the window round buffer management */
162 ldmEntry_t* hashTable;
163 BYTE* bucketOffsets; /* Next position in bucket to insert entry */
164 U64 hashPower; /* Used to compute the rolling hash.
165 * Depends on ldmParams.minMatchLength */
166} ldmState_t;
167
168typedef struct {
169 U32 enableLdm; /* 1 if enable long distance matching */
170 U32 hashLog; /* Log size of hashTable */
171 U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
172 U32 minMatchLength; /* Minimum match length */
173 U32 hashRateLog; /* Log number of entries to skip */
174 U32 windowLog; /* Window log for the LDM */
175} ldmParams_t;
176
177typedef struct {
178 U32 offset;
179 U32 litLength;
180 U32 matchLength;
181} rawSeq;
182
183typedef struct {
184 rawSeq* seq; /* The start of the sequences */
185 size_t pos; /* The position where reading stopped. <= size. */
186 size_t size; /* The number of sequences. <= capacity. */
187 size_t capacity; /* The capacity starting from `seq` pointer */
188} rawSeqStore_t;
189
190struct ZSTD_CCtx_params_s {
191 ZSTD_format_e format;
192 ZSTD_compressionParameters cParams;
193 ZSTD_frameParameters fParams;
194
195 int compressionLevel;
196 int forceWindow; /* force back-references to respect limit of
197 * 1<<wLog, even for dictionary */
198
199 ZSTD_dictAttachPref_e attachDictPref;
200 ZSTD_literalCompressionMode_e literalCompressionMode;
201
202 /* Multithreading: used to pass parameters to mtctx */
203 int nbWorkers;
204 size_t jobSize;
205 int overlapLog;
206 int rsyncable;
207
208 /* Long distance matching parameters */
209 ldmParams_t ldmParams;
210
211 /* Internal use, for createCCtxParams() and freeCCtxParams() only */
212 ZSTD_customMem customMem;
213}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
214
215struct ZSTD_CCtx_s {
216 ZSTD_compressionStage_e stage;
217 int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
218 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
219 ZSTD_CCtx_params requestedParams;
220 ZSTD_CCtx_params appliedParams;
221 U32 dictID;
222
223 int workSpaceOversizedDuration;
224 void* workSpace;
225 size_t workSpaceSize;
226 size_t blockSize;
227 unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
228 unsigned long long consumedSrcSize;
229 unsigned long long producedCSize;
230 XXH64_state_t xxhState;
231 ZSTD_customMem customMem;
232 size_t staticSize;
233
234 seqStore_t seqStore; /* sequences storage ptrs */
235 ldmState_t ldmState; /* long distance matching state */
236 rawSeq* ldmSequences; /* Storage for the ldm output sequences */
237 size_t maxNbLdmSequences;
238 rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
239 ZSTD_blockState_t blockState;
240 U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
241
242 /* streaming */
243 char* inBuff;
244 size_t inBuffSize;
245 size_t inToCompress;
246 size_t inBuffPos;
247 size_t inBuffTarget;
248 char* outBuff;
249 size_t outBuffSize;
250 size_t outBuffContentSize;
251 size_t outBuffFlushedSize;
252 ZSTD_cStreamStage streamStage;
253 U32 frameEnded;
254
255 /* Dictionary */
256 ZSTD_localDict localDict;
257 const ZSTD_CDict* cdict;
258 ZSTD_prefixDict prefixDict; /* single-usage dictionary */
259
260 /* Multi-threading */
261#ifdef ZSTD_MULTITHREAD
262 ZSTDMT_CCtx* mtctx;
263#endif
264};
265
266typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
267
268typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
269
270
271typedef size_t (*ZSTD_blockCompressor) (
272 ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
273 void const* src, size_t srcSize);
274ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
275
276
277MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
278{
279 static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
280 8, 9, 10, 11, 12, 13, 14, 15,
281 16, 16, 17, 17, 18, 18, 19, 19,
282 20, 20, 20, 20, 21, 21, 21, 21,
283 22, 22, 22, 22, 22, 22, 22, 22,
284 23, 23, 23, 23, 23, 23, 23, 23,
285 24, 24, 24, 24, 24, 24, 24, 24,
286 24, 24, 24, 24, 24, 24, 24, 24 };
287 static const U32 LL_deltaCode = 19;
288 return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
289}
290
291/* ZSTD_MLcode() :
292 * note : mlBase = matchLength - MINMATCH;
293 * because it's the format it's stored in seqStore->sequences */
294MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
295{
296 static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
297 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
298 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
299 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
300 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
301 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
302 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
303 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
304 static const U32 ML_deltaCode = 36;
305 return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
306}
307
308/*! ZSTD_storeSeq() :
309 * Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
310 * `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
311 * `mlBase` : matchLength - MINMATCH
312*/
313MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
314{
315#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
316 static const BYTE* g_start = NULL;
317 if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
318 { U32 const pos = (U32)((const BYTE*)literals - g_start);
319 DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
320 pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
321 }
322#endif
323 assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
324 /* copy Literals */
325 assert(seqStorePtr->maxNbLit <= 128 KB);
326 assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
327 ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
328 seqStorePtr->lit += litLength;
329
330 /* literal Length */
331 if (litLength>0xFFFF) {
332 assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
333 seqStorePtr->longLengthID = 1;
334 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
335 }
336 seqStorePtr->sequences[0].litLength = (U16)litLength;
337
338 /* match offset */
339 seqStorePtr->sequences[0].offset = offsetCode + 1;
340
341 /* match Length */
342 if (mlBase>0xFFFF) {
343 assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
344 seqStorePtr->longLengthID = 2;
345 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
346 }
347 seqStorePtr->sequences[0].matchLength = (U16)mlBase;
348
349 seqStorePtr->sequences++;
350}
351
352
353/*-*************************************
354* Match length counter
355***************************************/
356static unsigned ZSTD_NbCommonBytes (size_t val)
357{
358 if (MEM_isLittleEndian()) {
359 if (MEM_64bits()) {
360# if defined(_MSC_VER) && defined(_WIN64)
361 unsigned long r = 0;
362 _BitScanForward64( &r, (U64)val );
363 return (unsigned)(r>>3);
364# elif defined(__GNUC__) && (__GNUC__ >= 4)
365 return (__builtin_ctzll((U64)val) >> 3);
366# else
367 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
368 0, 3, 1, 3, 1, 4, 2, 7,
369 0, 2, 3, 6, 1, 5, 3, 5,
370 1, 3, 4, 4, 2, 5, 6, 7,
371 7, 0, 1, 2, 3, 3, 4, 6,
372 2, 6, 5, 5, 3, 4, 5, 6,
373 7, 1, 2, 4, 6, 4, 4, 5,
374 7, 2, 6, 5, 7, 6, 7, 7 };
375 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
376# endif
377 } else { /* 32 bits */
378# if defined(_MSC_VER)
379 unsigned long r=0;
380 _BitScanForward( &r, (U32)val );
381 return (unsigned)(r>>3);
382# elif defined(__GNUC__) && (__GNUC__ >= 3)
383 return (__builtin_ctz((U32)val) >> 3);
384# else
385 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
386 3, 2, 2, 1, 3, 2, 0, 1,
387 3, 3, 1, 2, 2, 2, 2, 0,
388 3, 1, 2, 0, 1, 0, 1, 1 };
389 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
390# endif
391 }
392 } else { /* Big Endian CPU */
393 if (MEM_64bits()) {
394# if defined(_MSC_VER) && defined(_WIN64)
395 unsigned long r = 0;
396 _BitScanReverse64( &r, val );
397 return (unsigned)(r>>3);
398# elif defined(__GNUC__) && (__GNUC__ >= 4)
399 return (__builtin_clzll(val) >> 3);
400# else
401 unsigned r;
402 const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
403 if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
404 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
405 r += (!val);
406 return r;
407# endif
408 } else { /* 32 bits */
409# if defined(_MSC_VER)
410 unsigned long r = 0;
411 _BitScanReverse( &r, (unsigned long)val );
412 return (unsigned)(r>>3);
413# elif defined(__GNUC__) && (__GNUC__ >= 3)
414 return (__builtin_clz((U32)val) >> 3);
415# else
416 unsigned r;
417 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
418 r += (!val);
419 return r;
420# endif
421 } }
422}
423
424
425MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
426{
427 const BYTE* const pStart = pIn;
428 const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
429
430 if (pIn < pInLoopLimit) {
431 { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
432 if (diff) return ZSTD_NbCommonBytes(diff); }
433 pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
434 while (pIn < pInLoopLimit) {
435 size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
436 if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
437 pIn += ZSTD_NbCommonBytes(diff);
438 return (size_t)(pIn - pStart);
439 } }
440 if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
441 if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
442 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
443 return (size_t)(pIn - pStart);
444}
445
446/** ZSTD_count_2segments() :
447 * can count match length with `ip` & `match` in 2 different segments.
448 * convention : on reaching mEnd, match count continue starting from iStart
449 */
450MEM_STATIC size_t
451ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
452 const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
453{
454 const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
455 size_t const matchLength = ZSTD_count(ip, match, vEnd);
456 if (match + matchLength != mEnd) return matchLength;
457 DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
458 DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
459 DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
460 DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
461 DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
462 return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
463}
464
465
466/*-*************************************
467 * Hashes
468 ***************************************/
469static const U32 prime3bytes = 506832829U;
470static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
471MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
472
473static const U32 prime4bytes = 2654435761U;
474static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
475static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
476
477static const U64 prime5bytes = 889523592379ULL;
478static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
479static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
480
481static const U64 prime6bytes = 227718039650203ULL;
482static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
483static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
484
485static const U64 prime7bytes = 58295818150454627ULL;
486static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
487static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
488
489static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
490static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
491static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
492
493MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
494{
495 switch(mls)
496 {
497 default:
498 case 4: return ZSTD_hash4Ptr(p, hBits);
499 case 5: return ZSTD_hash5Ptr(p, hBits);
500 case 6: return ZSTD_hash6Ptr(p, hBits);
501 case 7: return ZSTD_hash7Ptr(p, hBits);
502 case 8: return ZSTD_hash8Ptr(p, hBits);
503 }
504}
505
506/** ZSTD_ipow() :
507 * Return base^exponent.
508 */
509static U64 ZSTD_ipow(U64 base, U64 exponent)
510{
511 U64 power = 1;
512 while (exponent) {
513 if (exponent & 1) power *= base;
514 exponent >>= 1;
515 base *= base;
516 }
517 return power;
518}
519
520#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
521
522/** ZSTD_rollingHash_append() :
523 * Add the buffer to the hash value.
524 */
525static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
526{
527 BYTE const* istart = (BYTE const*)buf;
528 size_t pos;
529 for (pos = 0; pos < size; ++pos) {
530 hash *= prime8bytes;
531 hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
532 }
533 return hash;
534}
535
536/** ZSTD_rollingHash_compute() :
537 * Compute the rolling hash value of the buffer.
538 */
539MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
540{
541 return ZSTD_rollingHash_append(0, buf, size);
542}
543
544/** ZSTD_rollingHash_primePower() :
545 * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
546 * over a window of length bytes.
547 */
548MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
549{
550 return ZSTD_ipow(prime8bytes, length - 1);
551}
552
553/** ZSTD_rollingHash_rotate() :
554 * Rotate the rolling hash by one byte.
555 */
556MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
557{
558 hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
559 hash *= prime8bytes;
560 hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
561 return hash;
562}
563
564/*-*************************************
565* Round buffer management
566***************************************/
567/* Max current allowed */
568#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
569/* Maximum chunk size before overflow correction needs to be called again */
570#define ZSTD_CHUNKSIZE_MAX \
571 ( ((U32)-1) /* Maximum ending current index */ \
572 - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
573
574/**
575 * ZSTD_window_clear():
576 * Clears the window containing the history by simply setting it to empty.
577 */
578MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
579{
580 size_t const endT = (size_t)(window->nextSrc - window->base);
581 U32 const end = (U32)endT;
582
583 window->lowLimit = end;
584 window->dictLimit = end;
585}
586
587/**
588 * ZSTD_window_hasExtDict():
589 * Returns non-zero if the window has a non-empty extDict.
590 */
591MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
592{
593 return window.lowLimit < window.dictLimit;
594}
595
596/**
597 * ZSTD_matchState_dictMode():
598 * Inspects the provided matchState and figures out what dictMode should be
599 * passed to the compressor.
600 */
601MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
602{
603 return ZSTD_window_hasExtDict(ms->window) ?
604 ZSTD_extDict :
605 ms->dictMatchState != NULL ?
606 ZSTD_dictMatchState :
607 ZSTD_noDict;
608}
609
610/**
611 * ZSTD_window_needOverflowCorrection():
612 * Returns non-zero if the indices are getting too large and need overflow
613 * protection.
614 */
615MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
616 void const* srcEnd)
617{
618 U32 const current = (U32)((BYTE const*)srcEnd - window.base);
619 return current > ZSTD_CURRENT_MAX;
620}
621
622/**
623 * ZSTD_window_correctOverflow():
624 * Reduces the indices to protect from index overflow.
625 * Returns the correction made to the indices, which must be applied to every
626 * stored index.
627 *
628 * The least significant cycleLog bits of the indices must remain the same,
629 * which may be 0. Every index up to maxDist in the past must be valid.
630 * NOTE: (maxDist & cycleMask) must be zero.
631 */
632MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
633 U32 maxDist, void const* src)
634{
635 /* preemptive overflow correction:
636 * 1. correction is large enough:
637 * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
638 * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
639 *
640 * current - newCurrent
641 * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
642 * > (3<<29) - (1<<chainLog)
643 * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
644 * > 1<<29
645 *
646 * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
647 * After correction, current is less than (1<<chainLog + 1<<windowLog).
648 * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
649 * In 32-bit mode we are safe, because (chainLog <= 29), so
650 * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
651 * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
652 * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
653 */
654 U32 const cycleMask = (1U << cycleLog) - 1;
655 U32 const current = (U32)((BYTE const*)src - window->base);
656 U32 const newCurrent = (current & cycleMask) + maxDist;
657 U32 const correction = current - newCurrent;
658 assert((maxDist & cycleMask) == 0);
659 assert(current > newCurrent);
660 /* Loose bound, should be around 1<<29 (see above) */
661 assert(correction > 1<<28);
662
663 window->base += correction;
664 window->dictBase += correction;
665 window->lowLimit -= correction;
666 window->dictLimit -= correction;
667
668 DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
669 window->lowLimit);
670 return correction;
671}
672
673/**
674 * ZSTD_window_enforceMaxDist():
675 * Updates lowLimit so that:
676 * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
677 *
678 * This allows a simple check that index >= lowLimit to see if index is valid.
679 * This must be called before a block compression call, with srcEnd as the block
680 * source end.
681 *
682 * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
683 * This is because dictionaries are allowed to be referenced as long as the last
684 * byte of the dictionary is in the window, but once they are out of range,
685 * they cannot be referenced. If loadedDictEndPtr is NULL, we use
686 * loadedDictEnd == 0.
687 *
688 * In normal dict mode, the dict is between lowLimit and dictLimit. In
689 * dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary
690 * is below them. forceWindow and dictMatchState are therefore incompatible.
691 */
692MEM_STATIC void
693ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
694 void const* srcEnd,
695 U32 maxDist,
696 U32* loadedDictEndPtr,
697 const ZSTD_matchState_t** dictMatchStatePtr)
698{
699 U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
700 U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
701 DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
702 (unsigned)blockEndIdx, (unsigned)maxDist);
703 if (blockEndIdx > maxDist + loadedDictEnd) {
704 U32 const newLowLimit = blockEndIdx - maxDist;
705 if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
706 if (window->dictLimit < window->lowLimit) {
707 DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
708 (unsigned)window->dictLimit, (unsigned)window->lowLimit);
709 window->dictLimit = window->lowLimit;
710 }
711 if (loadedDictEndPtr)
712 *loadedDictEndPtr = 0;
713 if (dictMatchStatePtr)
714 *dictMatchStatePtr = NULL;
715 }
716}
717
718/**
719 * ZSTD_window_update():
720 * Updates the window by appending [src, src + srcSize) to the window.
721 * If it is not contiguous, the current prefix becomes the extDict, and we
722 * forget about the extDict. Handles overlap of the prefix and extDict.
723 * Returns non-zero if the segment is contiguous.
724 */
725MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
726 void const* src, size_t srcSize)
727{
728 BYTE const* const ip = (BYTE const*)src;
729 U32 contiguous = 1;
730 DEBUGLOG(5, "ZSTD_window_update");
731 /* Check if blocks follow each other */
732 if (src != window->nextSrc) {
733 /* not contiguous */
734 size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
735 DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
736 window->lowLimit = window->dictLimit;
737 assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
738 window->dictLimit = (U32)distanceFromBase;
739 window->dictBase = window->base;
740 window->base = ip - distanceFromBase;
741 // ms->nextToUpdate = window->dictLimit;
742 if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
743 contiguous = 0;
744 }
745 window->nextSrc = ip + srcSize;
746 /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
747 if ( (ip+srcSize > window->dictBase + window->lowLimit)
748 & (ip < window->dictBase + window->dictLimit)) {
749 ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
750 U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
751 window->lowLimit = lowLimitMax;
752 DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
753 }
754 return contiguous;
755}
756
757
758/* debug functions */
759#if (DEBUGLEVEL>=2)
760
761MEM_STATIC double ZSTD_fWeight(U32 rawStat)
762{
763 U32 const fp_accuracy = 8;
764 U32 const fp_multiplier = (1 << fp_accuracy);
765 U32 const newStat = rawStat + 1;
766 U32 const hb = ZSTD_highbit32(newStat);
767 U32 const BWeight = hb * fp_multiplier;
768 U32 const FWeight = (newStat << fp_accuracy) >> hb;
769 U32 const weight = BWeight + FWeight;
770 assert(hb + fp_accuracy < 31);
771 return (double)weight / fp_multiplier;
772}
773
774/* display a table content,
775 * listing each element, its frequency, and its predicted bit cost */
776MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
777{
778 unsigned u, sum;
779 for (u=0, sum=0; u<=max; u++) sum += table[u];
780 DEBUGLOG(2, "total nb elts: %u", sum);
781 for (u=0; u<=max; u++) {
782 DEBUGLOG(2, "%2u: %5u (%.2f)",
783 u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
784 }
785}
786
787#endif
788
789
790#if defined (__cplusplus)
791}
792#endif
793
794
795/* ==============================================================
796 * Private declarations
797 * These prototypes shall only be called from within lib/compress
798 * ============================================================== */
799
800/* ZSTD_getCParamsFromCCtxParams() :
801 * cParams are built depending on compressionLevel, src size hints,
802 * LDM and manually set compression parameters.
803 */
804ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
805 const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
806
807/*! ZSTD_initCStream_internal() :
808 * Private use only. Init streaming operation.
809 * expects params to be valid.
810 * must receive dict, or cdict, or none, but not both.
811 * @return : 0, or an error code */
812size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
813 const void* dict, size_t dictSize,
814 const ZSTD_CDict* cdict,
815 ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
816
817void ZSTD_resetSeqStore(seqStore_t* ssPtr);
818
819/*! ZSTD_getCParamsFromCDict() :
820 * as the name implies */
821ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
822
823/* ZSTD_compressBegin_advanced_internal() :
824 * Private use only. To be called from zstdmt_compress.c. */
825size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
826 const void* dict, size_t dictSize,
827 ZSTD_dictContentType_e dictContentType,
828 ZSTD_dictTableLoadMethod_e dtlm,
829 const ZSTD_CDict* cdict,
830 ZSTD_CCtx_params params,
831 unsigned long long pledgedSrcSize);
832
833/* ZSTD_compress_advanced_internal() :
834 * Private use only. To be called from zstdmt_compress.c. */
835size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
836 void* dst, size_t dstCapacity,
837 const void* src, size_t srcSize,
838 const void* dict,size_t dictSize,
839 ZSTD_CCtx_params params);
840
841
842/* ZSTD_writeLastEmptyBlock() :
843 * output an empty Block with end-of-frame mark to complete a frame
844 * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
845 * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
846 */
847size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
848
849
850/* ZSTD_referenceExternalSequences() :
851 * Must be called before starting a compression operation.
852 * seqs must parse a prefix of the source.
853 * This cannot be used when long range matching is enabled.
854 * Zstd will use these sequences, and pass the literals to a secondary block
855 * compressor.
856 * @return : An error code on failure.
857 * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
858 * access and data corruption.
859 */
860size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
861
862
863#endif /* ZSTD_COMPRESS_H */