blob: 9f158123f077cac1aa084ae4fd7d8184c81e9c90 [file] [log] [blame]
khenaidooac637102019-01-14 15:44:34 -05001/*
2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#include "zstd_compress_internal.h"
12#include "zstd_lazy.h"
13
14
15/*-*************************************
16* Binary Tree search
17***************************************/
18
19void ZSTD_updateDUBT(
20 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
21 const BYTE* ip, const BYTE* iend,
22 U32 mls)
23{
24 U32* const hashTable = ms->hashTable;
25 U32 const hashLog = cParams->hashLog;
26
27 U32* const bt = ms->chainTable;
28 U32 const btLog = cParams->chainLog - 1;
29 U32 const btMask = (1 << btLog) - 1;
30
31 const BYTE* const base = ms->window.base;
32 U32 const target = (U32)(ip - base);
33 U32 idx = ms->nextToUpdate;
34
35 if (idx != target)
36 DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
37 idx, target, ms->window.dictLimit);
38 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
39 (void)iend;
40
41 assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
42 for ( ; idx < target ; idx++) {
43 size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
44 U32 const matchIndex = hashTable[h];
45
46 U32* const nextCandidatePtr = bt + 2*(idx&btMask);
47 U32* const sortMarkPtr = nextCandidatePtr + 1;
48
49 DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
50 hashTable[h] = idx; /* Update Hash Table */
51 *nextCandidatePtr = matchIndex; /* update BT like a chain */
52 *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
53 }
54 ms->nextToUpdate = target;
55}
56
57
58/** ZSTD_insertDUBT1() :
59 * sort one already inserted but unsorted position
60 * assumption : current >= btlow == (current - btmask)
61 * doesn't fail */
62static void ZSTD_insertDUBT1(
63 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
64 U32 current, const BYTE* inputEnd,
65 U32 nbCompares, U32 btLow, int extDict)
66{
67 U32* const bt = ms->chainTable;
68 U32 const btLog = cParams->chainLog - 1;
69 U32 const btMask = (1 << btLog) - 1;
70 size_t commonLengthSmaller=0, commonLengthLarger=0;
71 const BYTE* const base = ms->window.base;
72 const BYTE* const dictBase = ms->window.dictBase;
73 const U32 dictLimit = ms->window.dictLimit;
74 const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
75 const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
76 const BYTE* const dictEnd = dictBase + dictLimit;
77 const BYTE* const prefixStart = base + dictLimit;
78 const BYTE* match;
79 U32* smallerPtr = bt + 2*(current&btMask);
80 U32* largerPtr = smallerPtr + 1;
81 U32 matchIndex = *smallerPtr;
82 U32 dummy32; /* to be nullified at the end */
83 U32 const windowLow = ms->window.lowLimit;
84
85 DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
86 current, dictLimit, windowLow);
87 assert(current >= btLow);
88 assert(ip < iend); /* condition for ZSTD_count */
89
90 while (nbCompares-- && (matchIndex > windowLow)) {
91 U32* const nextPtr = bt + 2*(matchIndex & btMask);
92 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
93 assert(matchIndex < current);
94
95 if ( (!extDict)
96 || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
97 || (current < dictLimit) /* both in extDict */) {
98 const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
99 assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
100 || (current < dictLimit) );
101 match = mBase + matchIndex;
102 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
103 } else {
104 match = dictBase + matchIndex;
105 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
106 if (matchIndex+matchLength >= dictLimit)
107 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
108 }
109
110 DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
111 current, matchIndex, (U32)matchLength);
112
113 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
114 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
115 }
116
117 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
118 /* match is smaller than current */
119 *smallerPtr = matchIndex; /* update smaller idx */
120 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
121 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
122 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
123 matchIndex, btLow, nextPtr[1]);
124 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
125 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
126 } else {
127 /* match is larger than current */
128 *largerPtr = matchIndex;
129 commonLengthLarger = matchLength;
130 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
131 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
132 matchIndex, btLow, nextPtr[0]);
133 largerPtr = nextPtr;
134 matchIndex = nextPtr[0];
135 } }
136
137 *smallerPtr = *largerPtr = 0;
138}
139
140
141static size_t ZSTD_DUBT_findBestMatch (
142 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
143 const BYTE* const ip, const BYTE* const iend,
144 size_t* offsetPtr,
145 U32 const mls,
146 U32 const extDict)
147{
148 U32* const hashTable = ms->hashTable;
149 U32 const hashLog = cParams->hashLog;
150 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
151 U32 matchIndex = hashTable[h];
152
153 const BYTE* const base = ms->window.base;
154 U32 const current = (U32)(ip-base);
155 U32 const windowLow = ms->window.lowLimit;
156
157 U32* const bt = ms->chainTable;
158 U32 const btLog = cParams->chainLog - 1;
159 U32 const btMask = (1 << btLog) - 1;
160 U32 const btLow = (btMask >= current) ? 0 : current - btMask;
161 U32 const unsortLimit = MAX(btLow, windowLow);
162
163 U32* nextCandidate = bt + 2*(matchIndex&btMask);
164 U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
165 U32 nbCompares = 1U << cParams->searchLog;
166 U32 nbCandidates = nbCompares;
167 U32 previousCandidate = 0;
168
169 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
170 assert(ip <= iend-8); /* required for h calculation */
171
172 /* reach end of unsorted candidates list */
173 while ( (matchIndex > unsortLimit)
174 && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
175 && (nbCandidates > 1) ) {
176 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
177 matchIndex);
178 *unsortedMark = previousCandidate;
179 previousCandidate = matchIndex;
180 matchIndex = *nextCandidate;
181 nextCandidate = bt + 2*(matchIndex&btMask);
182 unsortedMark = bt + 2*(matchIndex&btMask) + 1;
183 nbCandidates --;
184 }
185
186 if ( (matchIndex > unsortLimit)
187 && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
188 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
189 matchIndex);
190 *nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
191 }
192
193 /* batch sort stacked candidates */
194 matchIndex = previousCandidate;
195 while (matchIndex) { /* will end on matchIndex == 0 */
196 U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
197 U32 const nextCandidateIdx = *nextCandidateIdxPtr;
198 ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
199 nbCandidates, unsortLimit, extDict);
200 matchIndex = nextCandidateIdx;
201 nbCandidates++;
202 }
203
204 /* find longest match */
205 { size_t commonLengthSmaller=0, commonLengthLarger=0;
206 const BYTE* const dictBase = ms->window.dictBase;
207 const U32 dictLimit = ms->window.dictLimit;
208 const BYTE* const dictEnd = dictBase + dictLimit;
209 const BYTE* const prefixStart = base + dictLimit;
210 U32* smallerPtr = bt + 2*(current&btMask);
211 U32* largerPtr = bt + 2*(current&btMask) + 1;
212 U32 matchEndIdx = current+8+1;
213 U32 dummy32; /* to be nullified at the end */
214 size_t bestLength = 0;
215
216 matchIndex = hashTable[h];
217 hashTable[h] = current; /* Update Hash Table */
218
219 while (nbCompares-- && (matchIndex > windowLow)) {
220 U32* const nextPtr = bt + 2*(matchIndex & btMask);
221 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
222 const BYTE* match;
223
224 if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
225 match = base + matchIndex;
226 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
227 } else {
228 match = dictBase + matchIndex;
229 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
230 if (matchIndex+matchLength >= dictLimit)
231 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
232 }
233
234 if (matchLength > bestLength) {
235 if (matchLength > matchEndIdx - matchIndex)
236 matchEndIdx = matchIndex + (U32)matchLength;
237 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
238 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
239 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
240 break; /* drop, to guarantee consistency (miss a little bit of compression) */
241 }
242 }
243
244 if (match[matchLength] < ip[matchLength]) {
245 /* match is smaller than current */
246 *smallerPtr = matchIndex; /* update smaller idx */
247 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
248 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
249 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
250 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
251 } else {
252 /* match is larger than current */
253 *largerPtr = matchIndex;
254 commonLengthLarger = matchLength;
255 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
256 largerPtr = nextPtr;
257 matchIndex = nextPtr[0];
258 } }
259
260 *smallerPtr = *largerPtr = 0;
261
262 assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
263 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
264 if (bestLength >= MINMATCH) {
265 U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
266 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
267 current, (U32)bestLength, (U32)*offsetPtr, mIndex);
268 }
269 return bestLength;
270 }
271}
272
273
274/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
275static size_t ZSTD_BtFindBestMatch (
276 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
277 const BYTE* const ip, const BYTE* const iLimit,
278 size_t* offsetPtr,
279 const U32 mls /* template */)
280{
281 DEBUGLOG(7, "ZSTD_BtFindBestMatch");
282 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
283 ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
284 return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
285}
286
287
288static size_t ZSTD_BtFindBestMatch_selectMLS (
289 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
290 const BYTE* ip, const BYTE* const iLimit,
291 size_t* offsetPtr)
292{
293 switch(cParams->searchLength)
294 {
295 default : /* includes case 3 */
296 case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
297 case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
298 case 7 :
299 case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
300 }
301}
302
303
304/** Tree updater, providing best match */
305static size_t ZSTD_BtFindBestMatch_extDict (
306 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
307 const BYTE* const ip, const BYTE* const iLimit,
308 size_t* offsetPtr,
309 const U32 mls)
310{
311 DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
312 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
313 ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
314 return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
315}
316
317
318static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
319 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
320 const BYTE* ip, const BYTE* const iLimit,
321 size_t* offsetPtr)
322{
323 switch(cParams->searchLength)
324 {
325 default : /* includes case 3 */
326 case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
327 case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
328 case 7 :
329 case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
330 }
331}
332
333
334
335/* *********************************
336* Hash Chain
337***********************************/
338#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
339
340/* Update chains up to ip (excluded)
341 Assumption : always within prefix (i.e. not within extDict) */
342static U32 ZSTD_insertAndFindFirstIndex_internal(
343 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
344 const BYTE* ip, U32 const mls)
345{
346 U32* const hashTable = ms->hashTable;
347 const U32 hashLog = cParams->hashLog;
348 U32* const chainTable = ms->chainTable;
349 const U32 chainMask = (1 << cParams->chainLog) - 1;
350 const BYTE* const base = ms->window.base;
351 const U32 target = (U32)(ip - base);
352 U32 idx = ms->nextToUpdate;
353
354 while(idx < target) { /* catch up */
355 size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
356 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
357 hashTable[h] = idx;
358 idx++;
359 }
360
361 ms->nextToUpdate = target;
362 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
363}
364
365U32 ZSTD_insertAndFindFirstIndex(
366 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
367 const BYTE* ip)
368{
369 return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
370}
371
372
373/* inlining is important to hardwire a hot branch (template emulation) */
374FORCE_INLINE_TEMPLATE
375size_t ZSTD_HcFindBestMatch_generic (
376 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
377 const BYTE* const ip, const BYTE* const iLimit,
378 size_t* offsetPtr,
379 const U32 mls, const U32 extDict)
380{
381 U32* const chainTable = ms->chainTable;
382 const U32 chainSize = (1 << cParams->chainLog);
383 const U32 chainMask = chainSize-1;
384 const BYTE* const base = ms->window.base;
385 const BYTE* const dictBase = ms->window.dictBase;
386 const U32 dictLimit = ms->window.dictLimit;
387 const BYTE* const prefixStart = base + dictLimit;
388 const BYTE* const dictEnd = dictBase + dictLimit;
389 const U32 lowLimit = ms->window.lowLimit;
390 const U32 current = (U32)(ip-base);
391 const U32 minChain = current > chainSize ? current - chainSize : 0;
392 U32 nbAttempts = 1U << cParams->searchLog;
393 size_t ml=4-1;
394
395 /* HC4 match finder */
396 U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
397
398 for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
399 size_t currentMl=0;
400 if ((!extDict) || matchIndex >= dictLimit) {
401 const BYTE* const match = base + matchIndex;
402 if (match[ml] == ip[ml]) /* potentially better */
403 currentMl = ZSTD_count(ip, match, iLimit);
404 } else {
405 const BYTE* const match = dictBase + matchIndex;
406 assert(match+4 <= dictEnd);
407 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
408 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
409 }
410
411 /* save best solution */
412 if (currentMl > ml) {
413 ml = currentMl;
414 *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
415 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
416 }
417
418 if (matchIndex <= minChain) break;
419 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
420 }
421
422 return ml;
423}
424
425
426FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
427 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
428 const BYTE* ip, const BYTE* const iLimit,
429 size_t* offsetPtr)
430{
431 switch(cParams->searchLength)
432 {
433 default : /* includes case 3 */
434 case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
435 case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
436 case 7 :
437 case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
438 }
439}
440
441
442FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
443 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
444 const BYTE* ip, const BYTE* const iLimit,
445 size_t* const offsetPtr)
446{
447 switch(cParams->searchLength)
448 {
449 default : /* includes case 3 */
450 case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
451 case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
452 case 7 :
453 case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
454 }
455}
456
457
458/* *******************************
459* Common parser - lazy strategy
460*********************************/
461FORCE_INLINE_TEMPLATE
462size_t ZSTD_compressBlock_lazy_generic(
463 ZSTD_matchState_t* ms, seqStore_t* seqStore,
464 U32 rep[ZSTD_REP_NUM],
465 ZSTD_compressionParameters const* cParams,
466 const void* src, size_t srcSize,
467 const U32 searchMethod, const U32 depth)
468{
469 const BYTE* const istart = (const BYTE*)src;
470 const BYTE* ip = istart;
471 const BYTE* anchor = istart;
472 const BYTE* const iend = istart + srcSize;
473 const BYTE* const ilimit = iend - 8;
474 const BYTE* const base = ms->window.base + ms->window.dictLimit;
475
476 typedef size_t (*searchMax_f)(
477 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
478 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
479 searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
480 U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
481
482 /* init */
483 ip += (ip==base);
484 ms->nextToUpdate3 = ms->nextToUpdate;
485 { U32 const maxRep = (U32)(ip-base);
486 if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
487 if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
488 }
489
490 /* Match Loop */
491 while (ip < ilimit) {
492 size_t matchLength=0;
493 size_t offset=0;
494 const BYTE* start=ip+1;
495
496 /* check repCode */
497 if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
498 /* repcode : we take it */
499 matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
500 if (depth==0) goto _storeSequence;
501 }
502
503 /* first search (depth 0) */
504 { size_t offsetFound = 99999999;
505 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
506 if (ml2 > matchLength)
507 matchLength = ml2, start = ip, offset=offsetFound;
508 }
509
510 if (matchLength < 4) {
511 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
512 continue;
513 }
514
515 /* let's try to find a better solution */
516 if (depth>=1)
517 while (ip<ilimit) {
518 ip ++;
519 if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
520 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
521 int const gain2 = (int)(mlRep * 3);
522 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
523 if ((mlRep >= 4) && (gain2 > gain1))
524 matchLength = mlRep, offset = 0, start = ip;
525 }
526 { size_t offset2=99999999;
527 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
528 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
529 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
530 if ((ml2 >= 4) && (gain2 > gain1)) {
531 matchLength = ml2, offset = offset2, start = ip;
532 continue; /* search a better one */
533 } }
534
535 /* let's find an even better one */
536 if ((depth==2) && (ip<ilimit)) {
537 ip ++;
538 if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
539 size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
540 int const gain2 = (int)(ml2 * 4);
541 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
542 if ((ml2 >= 4) && (gain2 > gain1))
543 matchLength = ml2, offset = 0, start = ip;
544 }
545 { size_t offset2=99999999;
546 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
547 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
548 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
549 if ((ml2 >= 4) && (gain2 > gain1)) {
550 matchLength = ml2, offset = offset2, start = ip;
551 continue;
552 } } }
553 break; /* nothing found : store previous solution */
554 }
555
556 /* NOTE:
557 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
558 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
559 * overflows the pointer, which is undefined behavior.
560 */
561 /* catch up */
562 if (offset) {
563 while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
564 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
565 { start--; matchLength++; }
566 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
567 }
568 /* store sequence */
569_storeSequence:
570 { size_t const litLength = start - anchor;
571 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
572 anchor = ip = start + matchLength;
573 }
574
575 /* check immediate repcode */
576 while ( ((ip <= ilimit) & (offset_2>0))
577 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
578 /* store sequence */
579 matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
580 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
581 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
582 ip += matchLength;
583 anchor = ip;
584 continue; /* faster when present ... (?) */
585 } }
586
587 /* Save reps for next block */
588 rep[0] = offset_1 ? offset_1 : savedOffset;
589 rep[1] = offset_2 ? offset_2 : savedOffset;
590
591 /* Return the last literals size */
592 return iend - anchor;
593}
594
595
596size_t ZSTD_compressBlock_btlazy2(
597 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
598 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
599{
600 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
601}
602
603size_t ZSTD_compressBlock_lazy2(
604 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
605 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
606{
607 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
608}
609
610size_t ZSTD_compressBlock_lazy(
611 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
612 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
613{
614 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
615}
616
617size_t ZSTD_compressBlock_greedy(
618 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
619 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
620{
621 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
622}
623
624
625FORCE_INLINE_TEMPLATE
626size_t ZSTD_compressBlock_lazy_extDict_generic(
627 ZSTD_matchState_t* ms, seqStore_t* seqStore,
628 U32 rep[ZSTD_REP_NUM],
629 ZSTD_compressionParameters const* cParams,
630 const void* src, size_t srcSize,
631 const U32 searchMethod, const U32 depth)
632{
633 const BYTE* const istart = (const BYTE*)src;
634 const BYTE* ip = istart;
635 const BYTE* anchor = istart;
636 const BYTE* const iend = istart + srcSize;
637 const BYTE* const ilimit = iend - 8;
638 const BYTE* const base = ms->window.base;
639 const U32 dictLimit = ms->window.dictLimit;
640 const U32 lowestIndex = ms->window.lowLimit;
641 const BYTE* const prefixStart = base + dictLimit;
642 const BYTE* const dictBase = ms->window.dictBase;
643 const BYTE* const dictEnd = dictBase + dictLimit;
644 const BYTE* const dictStart = dictBase + lowestIndex;
645
646 typedef size_t (*searchMax_f)(
647 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
648 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
649 searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
650
651 U32 offset_1 = rep[0], offset_2 = rep[1];
652
653 /* init */
654 ms->nextToUpdate3 = ms->nextToUpdate;
655 ip += (ip == prefixStart);
656
657 /* Match Loop */
658 while (ip < ilimit) {
659 size_t matchLength=0;
660 size_t offset=0;
661 const BYTE* start=ip+1;
662 U32 current = (U32)(ip-base);
663
664 /* check repCode */
665 { const U32 repIndex = (U32)(current+1 - offset_1);
666 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
667 const BYTE* const repMatch = repBase + repIndex;
668 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
669 if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
670 /* repcode detected we should take it */
671 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
672 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
673 if (depth==0) goto _storeSequence;
674 } }
675
676 /* first search (depth 0) */
677 { size_t offsetFound = 99999999;
678 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
679 if (ml2 > matchLength)
680 matchLength = ml2, start = ip, offset=offsetFound;
681 }
682
683 if (matchLength < 4) {
684 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
685 continue;
686 }
687
688 /* let's try to find a better solution */
689 if (depth>=1)
690 while (ip<ilimit) {
691 ip ++;
692 current++;
693 /* check repCode */
694 if (offset) {
695 const U32 repIndex = (U32)(current - offset_1);
696 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
697 const BYTE* const repMatch = repBase + repIndex;
698 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
699 if (MEM_read32(ip) == MEM_read32(repMatch)) {
700 /* repcode detected */
701 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
702 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
703 int const gain2 = (int)(repLength * 3);
704 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
705 if ((repLength >= 4) && (gain2 > gain1))
706 matchLength = repLength, offset = 0, start = ip;
707 } }
708
709 /* search match, depth 1 */
710 { size_t offset2=99999999;
711 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
712 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
713 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
714 if ((ml2 >= 4) && (gain2 > gain1)) {
715 matchLength = ml2, offset = offset2, start = ip;
716 continue; /* search a better one */
717 } }
718
719 /* let's find an even better one */
720 if ((depth==2) && (ip<ilimit)) {
721 ip ++;
722 current++;
723 /* check repCode */
724 if (offset) {
725 const U32 repIndex = (U32)(current - offset_1);
726 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
727 const BYTE* const repMatch = repBase + repIndex;
728 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
729 if (MEM_read32(ip) == MEM_read32(repMatch)) {
730 /* repcode detected */
731 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
732 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
733 int const gain2 = (int)(repLength * 4);
734 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
735 if ((repLength >= 4) && (gain2 > gain1))
736 matchLength = repLength, offset = 0, start = ip;
737 } }
738
739 /* search match, depth 2 */
740 { size_t offset2=99999999;
741 size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
742 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
743 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
744 if ((ml2 >= 4) && (gain2 > gain1)) {
745 matchLength = ml2, offset = offset2, start = ip;
746 continue;
747 } } }
748 break; /* nothing found : store previous solution */
749 }
750
751 /* catch up */
752 if (offset) {
753 U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
754 const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
755 const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
756 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
757 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
758 }
759
760 /* store sequence */
761_storeSequence:
762 { size_t const litLength = start - anchor;
763 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
764 anchor = ip = start + matchLength;
765 }
766
767 /* check immediate repcode */
768 while (ip <= ilimit) {
769 const U32 repIndex = (U32)((ip-base) - offset_2);
770 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
771 const BYTE* const repMatch = repBase + repIndex;
772 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
773 if (MEM_read32(ip) == MEM_read32(repMatch)) {
774 /* repcode detected we should take it */
775 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
776 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
777 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
778 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
779 ip += matchLength;
780 anchor = ip;
781 continue; /* faster when present ... (?) */
782 }
783 break;
784 } }
785
786 /* Save reps for next block */
787 rep[0] = offset_1;
788 rep[1] = offset_2;
789
790 /* Return the last literals size */
791 return iend - anchor;
792}
793
794
795size_t ZSTD_compressBlock_greedy_extDict(
796 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
797 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
798{
799 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
800}
801
802size_t ZSTD_compressBlock_lazy_extDict(
803 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
804 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
805
806{
807 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
808}
809
810size_t ZSTD_compressBlock_lazy2_extDict(
811 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
812 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
813
814{
815 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
816}
817
818size_t ZSTD_compressBlock_btlazy2_extDict(
819 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
820 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
821
822{
823 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
824}