diff --git a/contrib/linux-kernel/zstd.diff b/contrib/linux-kernel/zstd.diff index c2775bbb..7d7bf553 100644 --- a/contrib/linux-kernel/zstd.diff +++ b/contrib/linux-kernel/zstd.diff @@ -1203,10 +1203,10 @@ index 0000000..aa5eb4d + huf_decompress.o decompress.o diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h new file mode 100644 -index 0000000..9d21540 +index 0000000..9b5d2bc --- /dev/null +++ b/lib/zstd/bitstream.h -@@ -0,0 +1,391 @@ +@@ -0,0 +1,376 @@ +/* ****************************************************************** + bitstream + Part of FSE library @@ -1356,22 +1356,7 @@ index 0000000..9d21540 +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (register U32 val) +{ -+# if defined(_MSC_VER) /* Visual */ -+ unsigned long r=0; -+ _BitScanReverse ( &r, val ); -+ return (unsigned) r; -+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ -+ return 31 - __builtin_clz (val); -+# else /* Software version */ -+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; -+ U32 v = val; -+ v |= v >> 1; -+ v |= v >> 2; -+ v |= v >> 4; -+ v |= v >> 8; -+ v |= v >> 16; -+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; -+# endif ++ return 31 - __builtin_clz(val); +} + +/*===== Local Constants =====*/ @@ -1576,7 +1561,7 @@ index 0000000..9d21540 + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } -+ { U32 nbBytes = bitD->bitsConsumed >> 3; ++ { U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ @@ -1600,10 +1585,10 @@ index 0000000..9d21540 +#endif /* BITSTREAM_H_MODULE */ diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c new file mode 100644 -index 0000000..79c3207 +index 0000000..4f1e184 --- /dev/null +++ b/lib/zstd/compress.c -@@ -0,0 +1,3384 @@ +@@ -0,0 +1,3297 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. @@ -1625,10 +1610,6 @@ index 0000000..79c3207 +#include "huf.h" +#include "zstd_internal.h" /* includes zstd.h */ + -+#ifdef current -+# undef current -+#endif -+ +/*-************************************* +* Constants +***************************************/ @@ -1659,7 +1640,7 @@ index 0000000..79c3207 +* Context memory management +***************************************/ +struct ZSTD_CCtx_s { -+ const BYTE* nextSrc; /* next block here to continue on current prefix */ ++ const BYTE* nextSrc; /* next block here to continue on curr prefix */ + const BYTE* base; /* All regular indexes relative to this position */ + const BYTE* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ @@ -1787,14 +1768,14 @@ index 0000000..79c3207 + if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ + + /* resize params, to use less memory when necessary */ -+ { U32 const minSrcSize = (srcSize==0) ? 500 : 0; ++ { U32 const minSrcSize = (srcSize==0) ? 500 : 0; + U64 const rSize = srcSize + dictSize + minSrcSize; + if (rSize < ((U64)1< srcLog) cPar.windowLog = srcLog; + } } + if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; -+ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); ++ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); + } + @@ -1846,7 +1827,7 @@ index 0000000..79c3207 + return ZSTD_continueCCtx(zc, params, frameContentSize); + } + -+ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); ++ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); + U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; @@ -1858,7 +1839,7 @@ index 0000000..79c3207 + void* ptr; + + /* Check if workSpace is large enough, alloc a new one if needed */ -+ { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); -+ { ZSTD_parameters params = srcCCtx->params; ++ { ZSTD_parameters params = srcCCtx->params; + params.fParams.contentSizeFlag = (pledgedSrcSize > 0); + ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); + } + + /* copy tables */ -+ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); ++ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); + size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; + size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); @@ -2086,12 +2067,12 @@ index 0000000..79c3207 + + /* small ? don't even attempt compression (speed opt) */ +# define LITERAL_NOENTROPY 63 -+ { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; ++ { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; + if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + + if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ -+ { HUF_repeat repeat = zc->flagStaticHufTable; ++ { HUF_repeat repeat = zc->flagStaticHufTable; + int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) @@ -2113,18 +2094,18 @@ index 0000000..79c3207 + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ -+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); ++ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ -+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); ++ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + default: /* should not be necessary, lhSize is only {3,4,5} */ + case 5: /* 2 - 2 - 18 - 18 */ -+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); ++ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; @@ -2199,7 +2180,7 @@ index 0000000..79c3207 + BYTE scratchBuffer[1<litStart; ++ { const BYTE* const literals = seqStorePtr->litStart; + size_t const litSize = seqStorePtr->lit - literals; + size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); + if (ZSTD_isError(cSize)) return cSize; @@ -2223,7 +2204,7 @@ index 0000000..79c3207 + ZSTD_seqToCodes(seqStorePtr); + + /* CTable for Literal Lengths */ -+ { U32 max = MaxLL; ++ { U32 max = MaxLL; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = llCodeTable[0]; @@ -2247,7 +2228,7 @@ index 0000000..79c3207 + } } + + /* CTable for Offsets */ -+ { U32 max = MaxOff; ++ { U32 max = MaxOff; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = ofCodeTable[0]; @@ -2271,7 +2252,7 @@ index 0000000..79c3207 + } } + + /* CTable for MatchLengths */ -+ { U32 max = MaxML; ++ { U32 max = MaxML; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = *mlCodeTable; @@ -2298,7 +2279,7 @@ index 0000000..79c3207 + zc->flagStaticTables = 0; + + /* Encoding Sequences */ -+ { BIT_CStream_t blockStream; ++ { BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; @@ -2327,7 +2308,7 @@ index 0000000..79c3207 + } + BIT_flushBits(&blockStream); + -+ { size_t n; ++ { size_t n; + for (n=nbSeq-2 ; n= maxCSize) { + zc->flagStaticHufTable = HUF_repeat_none; @@ -2384,12 +2365,6 @@ index 0000000..79c3207 + return op - ostart; +} + -+#if 0 /* for debug */ -+# define STORESEQ_DEBUG -+U32 g_startDebug = 0; -+const BYTE* g_start = NULL; -+#endif -+ +/*! ZSTD_storeSeq() : + Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. + `offsetCode` : distance to match, or 0 == repCode. @@ -2397,15 +2372,6 @@ index 0000000..79c3207 +*/ +MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) +{ -+#ifdef STORESEQ_DEBUG -+ if (g_startDebug) { -+ const U32 pos = (U32)((const BYTE*)literals - g_start); -+ if (g_start==NULL) g_start = (const BYTE*)literals; -+ if ((pos > 1895000) && (pos < 1895300)) -+ fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", -+ pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); -+ } -+#endif + /* copy Literals */ + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); + seqStorePtr->lit += litLength; @@ -2432,57 +2398,15 @@ index 0000000..79c3207 +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { -+# if defined(_MSC_VER) && defined(_WIN64) -+ unsigned long r = 0; -+ _BitScanForward64( &r, (U64)val ); -+ return (unsigned)(r>>3); -+# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctzll((U64)val) >> 3); -+# else -+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; -+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -+# endif + } else { /* 32 bits */ -+# if defined(_MSC_VER) -+ unsigned long r=0; -+ _BitScanForward( &r, (U32)val ); -+ return (unsigned)(r>>3); -+# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); -+# else -+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; -+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -+# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { -+# if defined(_MSC_VER) && defined(_WIN64) -+ unsigned long r = 0; -+ _BitScanReverse64( &r, val ); -+ return (unsigned)(r>>3); -+# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clzll(val) >> 3); -+# else -+ unsigned r; -+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ -+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } -+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } -+ r += (!val); -+ return r; -+# endif + } else { /* 32 bits */ -+# if defined(_MSC_VER) -+ unsigned long r = 0; -+ _BitScanReverse( &r, (unsigned long)val ); -+ return (unsigned)(r>>3); -+# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); -+# else -+ unsigned r; -+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } -+ r += (!val); -+ return r; -+# endif + } } +} + @@ -2599,7 +2523,7 @@ index 0000000..79c3207 + + /* init */ + ip += (ip==lowest); -+ { U32 const maxRep = (U32)(ip-lowest); ++ { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } @@ -2608,10 +2532,10 @@ index 0000000..79c3207 + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hBits, mls); -+ U32 const current = (U32)(ip-base); ++ U32 const curr = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; -+ hashTable[h] = current; /* update hash table */ ++ hashTable[h] = curr; /* update hash table */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; @@ -2638,7 +2562,7 @@ index 0000000..79c3207 + + if (ip <= ilimit) { + /* Fill Table */ -+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ ++ hashTable[ZSTD_hashPtr(base+curr+2, hBits, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while ( (ip <= ilimit) @@ -2659,7 +2583,7 @@ index 0000000..79c3207 + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -2712,12 +2636,12 @@ index 0000000..79c3207 + const U32 matchIndex = hashTable[h]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; -+ const U32 current = (U32)(ip-base); -+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ ++ const U32 curr = (U32)(ip-base); ++ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; -+ hashTable[h] = current; /* update hash table */ ++ hashTable[h] = curr; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { @@ -2731,12 +2655,12 @@ index 0000000..79c3207 + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } -+ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; ++ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ -+ offset = current - matchIndex; ++ offset = curr - matchIndex; + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); @@ -2748,12 +2672,12 @@ index 0000000..79c3207 + + if (ip <= ilimit) { + /* Fill Table */ -+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; ++ hashTable[ZSTD_hashPtr(base+curr+2, hBits, mls)] = curr+2; + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { -+ U32 const current2 = (U32)(ip-base); -+ U32 const repIndex2 = current2 - offset_2; ++ U32 const curr2 = (U32)(ip-base); ++ U32 const repIndex2 = curr2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { @@ -2761,7 +2685,7 @@ index 0000000..79c3207 + size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); -+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; ++ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2; + ip += repLength2; + anchor = ip; + continue; @@ -2773,7 +2697,7 @@ index 0000000..79c3207 + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -2844,7 +2768,7 @@ index 0000000..79c3207 + + /* init */ + ip += (ip==lowest); -+ { U32 const maxRep = (U32)(ip-lowest); ++ { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } @@ -2854,14 +2778,14 @@ index 0000000..79c3207 + size_t mLength; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); -+ U32 const current = (U32)(ip-base); ++ U32 const curr = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 const matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; -+ hashLong[h2] = hashSmall[h] = current; /* update hash tables */ ++ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ + -+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */ ++ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= curr */ + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); @@ -2875,7 +2799,7 @@ index 0000000..79c3207 + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* match3 = base + matchIndex3; -+ hashLong[h3] = current + 1; ++ hashLong[h3] = curr + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + mLength = ZSTD_count(ip+9, match3+8, iend) + 8; + ip++; @@ -2903,8 +2827,8 @@ index 0000000..79c3207 + + if (ip <= ilimit) { + /* Fill Table */ -+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = -+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ ++ hashLong[ZSTD_hashPtr(base+curr+2, hBitsL, 8)] = ++ hashSmall[ZSTD_hashPtr(base+curr+2, hBitsS, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + @@ -2928,7 +2852,7 @@ index 0000000..79c3207 + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -2988,12 +2912,12 @@ index 0000000..79c3207 + const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + -+ const U32 current = (U32)(ip-base); -+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ ++ const U32 curr = (U32)(ip-base); ++ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; -+ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ ++ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { @@ -3007,7 +2931,7 @@ index 0000000..79c3207 + const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; -+ offset = current - matchLongIndex; ++ offset = curr - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; @@ -3019,19 +2943,19 @@ index 0000000..79c3207 + const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; -+ hashLong[h3] = current + 1; ++ hashLong[h3] = curr + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; + ip++; -+ offset = current+1 - matchIndex3; ++ offset = curr+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; -+ offset = current - matchIndex; ++ offset = curr - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; @@ -3049,14 +2973,14 @@ index 0000000..79c3207 + + if (ip <= ilimit) { + /* Fill Table */ -+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; -+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; ++ hashSmall[ZSTD_hashPtr(base+curr+2, hBitsS, mls)] = curr+2; ++ hashLong[ZSTD_hashPtr(base+curr+2, hBitsL, 8)] = curr+2; + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { -+ U32 const current2 = (U32)(ip-base); -+ U32 const repIndex2 = current2 - offset_2; ++ U32 const curr2 = (U32)(ip-base); ++ U32 const repIndex2 = curr2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { @@ -3064,8 +2988,8 @@ index 0000000..79c3207 + size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); -+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; -+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ++ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2; ++ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2; + ip += repLength2; + anchor = ip; + continue; @@ -3077,7 +3001,7 @@ index 0000000..79c3207 + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -3126,47 +3050,21 @@ index 0000000..79c3207 + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; -+ const U32 current = (U32)(ip-base); -+ const U32 btLow = btMask >= current ? 0 : current - btMask; -+ U32* smallerPtr = bt + 2*(current&btMask); ++ const U32 curr = (U32)(ip-base); ++ const U32 btLow = btMask >= curr ? 0 : curr - btMask; ++ U32* smallerPtr = bt + 2*(curr&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = zc->lowLimit; -+ U32 matchEndIdx = current+8; ++ U32 matchEndIdx = curr+8; + size_t bestLength = 8; -+#ifdef ZSTD_C_PREDICT -+ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); -+ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); -+ predictedSmall += (predictedSmall>0); -+ predictedLarge += (predictedLarge>0); -+#endif /* ZSTD_C_PREDICT */ + -+ hashTable[h] = current; /* Update Hash Table */ ++ hashTable[h] = curr; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + -+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ -+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ -+ if (matchIndex == predictedSmall) { -+ /* no need to check length, result known */ -+ *smallerPtr = matchIndex; -+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ -+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ -+ predictedSmall = predictPtr[1] + (predictPtr[1]>0); -+ continue; -+ } -+ if (matchIndex == predictedLarge) { -+ *largerPtr = matchIndex; -+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ -+ largerPtr = nextPtr; -+ matchIndex = nextPtr[0]; -+ predictedLarge = predictPtr[0] + (predictPtr[0]>0); -+ continue; -+ } -+#endif + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) @@ -3188,14 +3086,14 @@ index 0000000..79c3207 + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ + + if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ -+ /* match is smaller than current */ ++ /* match is smaller than curr */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ + } else { -+ /* match is larger than current */ ++ /* match is larger than curr */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ @@ -3205,7 +3103,7 @@ index 0000000..79c3207 + + *smallerPtr = *largerPtr = 0; + if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ -+ if (matchEndIdx > current + 8) return matchEndIdx - current - 8; ++ if (matchEndIdx > curr + 8) return matchEndIdx - curr - 8; + return 1; +} + @@ -3230,16 +3128,16 @@ index 0000000..79c3207 + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; -+ const U32 current = (U32)(ip-base); -+ const U32 btLow = btMask >= current ? 0 : current - btMask; ++ const U32 curr = (U32)(ip-base); ++ const U32 btLow = btMask >= curr ? 0 : curr - btMask; + const U32 windowLow = zc->lowLimit; -+ U32* smallerPtr = bt + 2*(current&btMask); -+ U32* largerPtr = bt + 2*(current&btMask) + 1; -+ U32 matchEndIdx = current+8; ++ U32* smallerPtr = bt + 2*(curr&btMask); ++ U32* largerPtr = bt + 2*(curr&btMask) + 1; ++ U32 matchEndIdx = curr+8; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + -+ hashTable[h] = current; /* Update Hash Table */ ++ hashTable[h] = curr; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); @@ -3260,21 +3158,21 @@ index 0000000..79c3207 + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; -+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) -+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; ++ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) ++ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { -+ /* match is smaller than current */ ++ /* match is smaller than curr */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ + } else { -+ /* match is larger than current */ ++ /* match is larger than curr */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ @@ -3284,7 +3182,7 @@ index 0000000..79c3207 + + *smallerPtr = *largerPtr = 0; + -+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; ++ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr+1; + return bestLength; +} + @@ -3417,8 +3315,8 @@ index 0000000..79c3207 + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 lowLimit = zc->lowLimit; -+ const U32 current = (U32)(ip-base); -+ const U32 minChain = current > chainSize ? current - chainSize : 0; ++ const U32 curr = (U32)(ip-base); ++ const U32 minChain = curr > chainSize ? curr - chainSize : 0; + int nbAttempts=maxNbAttempts; + size_t ml=EQUAL_READ32-1; + @@ -3427,19 +3325,19 @@ index 0000000..79c3207 + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + const BYTE* match; -+ size_t currentMl=0; ++ size_t currMl=0; + if ((!extDict) || matchIndex >= dictLimit) { + match = base + matchIndex; + if (match[ml] == ip[ml]) /* potentially better */ -+ currentMl = ZSTD_count(ip, match, iLimit); ++ currMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex; + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ -+ currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; ++ currMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; + } + + /* save best solution */ -+ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ } ++ if (currMl > ml) { ml = currMl; *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; if (ip+currMl == iLimit) break; /* best possible, and avoid read overflow*/ } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); @@ -3511,7 +3409,7 @@ index 0000000..79c3207 + /* init */ + ip += (ip==base); + ctx->nextToUpdate3 = ctx->nextToUpdate; -+ { U32 const maxRep = (U32)(ip-base); ++ { U32 const maxRep = (U32)(ip-base); + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } @@ -3530,7 +3428,7 @@ index 0000000..79c3207 + } + + /* first search (depth 0) */ -+ { size_t offsetFound = 99999999; ++ { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; @@ -3552,7 +3450,7 @@ index 0000000..79c3207 + if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } -+ { size_t offset2=99999999; ++ { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); @@ -3571,7 +3469,7 @@ index 0000000..79c3207 + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = ml2, offset = 0, start = ip; + } -+ { size_t offset2=99999999; ++ { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); @@ -3591,7 +3489,7 @@ index 0000000..79c3207 + + /* store sequence */ +_storeSequence: -+ { size_t const litLength = start - anchor; ++ { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } @@ -3614,7 +3512,7 @@ index 0000000..79c3207 + ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -3680,10 +3578,10 @@ index 0000000..79c3207 + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; -+ U32 current = (U32)(ip-base); ++ U32 curr = (U32)(ip-base); + + /* check repCode */ -+ { const U32 repIndex = (U32)(current+1 - offset_1); ++ { const U32 repIndex = (U32)(curr+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ @@ -3695,7 +3593,7 @@ index 0000000..79c3207 + } } + + /* first search (depth 0) */ -+ { size_t offsetFound = 99999999; ++ { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; @@ -3710,10 +3608,10 @@ index 0000000..79c3207 + if (depth>=1) + while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ @@ -3728,7 +3626,7 @@ index 0000000..79c3207 + } } + + /* search match, depth 1 */ -+ { size_t offset2=99999999; ++ { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); @@ -3740,10 +3638,10 @@ index 0000000..79c3207 + /* let's find an even better one */ + if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ @@ -3758,7 +3656,7 @@ index 0000000..79c3207 + } } + + /* search match, depth 2 */ -+ { size_t offset2=99999999; ++ { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); @@ -3780,7 +3678,7 @@ index 0000000..79c3207 + + /* store sequence */ +_storeSequence: -+ { size_t const litLength = start - anchor; ++ { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } @@ -3808,7 +3706,7 @@ index 0000000..79c3207 + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -3898,11 +3796,11 @@ index 0000000..79c3207 + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); + const BYTE* const base = zc->base; + const BYTE* const istart = (const BYTE*)src; -+ const U32 current = (U32)(istart-base); ++ const U32 curr = (U32)(istart-base); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */ + ZSTD_resetSeqStore(&(zc->seqStore)); -+ if (current > zc->nextToUpdate + 384) -+ zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ ++ if (curr > zc->nextToUpdate + 384) ++ zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ + blockCompressor(zc, src, srcSize); + return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); +} @@ -3940,9 +3838,9 @@ index 0000000..79c3207 + /* preemptive overflow correction */ + if (cctx->lowLimit > (3U<<29)) { + U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; -+ U32 const current = (U32)(ip - cctx->base); -+ U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog); -+ U32 const correction = current - newCurrent; ++ U32 const curr = (U32)(ip - cctx->base); ++ U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog); ++ U32 const correction = curr - newCurr; + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); + ZSTD_reduceIndex(cctx, correction); + cctx->base += correction; @@ -4103,7 +4001,7 @@ index 0000000..79c3207 + const BYTE* const ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + -+ /* input becomes current prefix */ ++ /* input becomes curr prefix */ + zc->lowLimit = zc->dictLimit; + zc->dictLimit = (U32)(zc->nextSrc - zc->base); + zc->dictBase = zc->base; @@ -4182,12 +4080,12 @@ index 0000000..79c3207 + cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); + dictPtr += 4; + -+ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); ++ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); + dictPtr += hufHeaderSize; + } + -+ { unsigned offcodeLog; ++ { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); @@ -4196,7 +4094,7 @@ index 0000000..79c3207 + dictPtr += offcodeHeaderSize; + } + -+ { short matchlengthNCount[MaxML+1]; ++ { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -4207,7 +4105,7 @@ index 0000000..79c3207 + dictPtr += matchlengthHeaderSize; + } + -+ { short litlengthNCount[MaxLL+1]; ++ { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -4224,7 +4122,7 @@ index 0000000..79c3207 + cctx->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + -+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); ++ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ @@ -4233,7 +4131,7 @@ index 0000000..79c3207 + /* All offset values <= dictContentSize + 128 KB must be representable */ + CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); + /* All repCodes must be <= dictContentSize and != 0*/ -+ { U32 u; ++ { U32 u; + for (u=0; u<3; u++) { + if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted); + if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); @@ -4391,7 +4289,7 @@ index 0000000..79c3207 +{ + if (!customMem.customAlloc || !customMem.customFree) return NULL; + -+ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); ++ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); + ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); + + if (!cdict || !cctx) { @@ -4411,7 +4309,7 @@ index 0000000..79c3207 + cdict->dictContent = internalBuffer; + } + -+ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); ++ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); + if (ZSTD_isError(errorCode)) { + ZSTD_free(cdict->dictBuffer, customMem); + ZSTD_free(cdict, customMem); @@ -4434,7 +4332,7 @@ index 0000000..79c3207 +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ -+ { ZSTD_customMem const cMem = cdict->refContext->customMem; ++ { ZSTD_customMem const cMem = cdict->refContext->customMem; + ZSTD_freeCCtx(cdict->refContext); + ZSTD_free(cdict->dictBuffer, cMem); + ZSTD_free(cdict, cMem); @@ -4536,7 +4434,7 @@ index 0000000..79c3207 +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + if (zcs==NULL) return 0; /* support free on NULL */ -+ { ZSTD_customMem const cMem = zcs->customMem; ++ { ZSTD_customMem const cMem = zcs->customMem; + ZSTD_freeCCtx(zcs->cctx); + zcs->cctx = NULL; + ZSTD_freeCDict(zcs->cdictLocal); @@ -4587,7 +4485,7 @@ index 0000000..79c3207 + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + /* allocate buffers */ -+ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; ++ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; + if (zcs->inBuffSize < neededInBuffSize) { + zcs->inBuffSize = neededInBuffSize; + ZSTD_free(zcs->inBuff, zcs->customMem); @@ -4671,15 +4569,15 @@ index 0000000..79c3207 + + case zcss_load: + /* complete inBuffer */ -+ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; ++ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); + zcs->inBuffPos += loaded; + ip += loaded; + if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) { + someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */ + } } -+ /* compress current block (note : this stage cannot be stopped in the middle) */ -+ { void* cDst; ++ /* compress curr block (note : this stage cannot be stopped in the middle) */ ++ { void* cDst; + size_t cSize; + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; + size_t oSize = oend-op; @@ -4704,7 +4602,7 @@ index 0000000..79c3207 + } + + case zcss_flush: -+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; ++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; @@ -4727,7 +4625,7 @@ index 0000000..79c3207 + *dstCapacityPtr = op - ostart; + zcs->inputProcessed += *srcSizePtr; + if (zcs->frameEnded) return 0; -+ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; ++ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; + if (hintInSize==0) hintInSize = zcs->blockSize; + return hintInSize; + } @@ -4791,7 +4689,7 @@ index 0000000..79c3207 + } + + /* flush epilogue */ -+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; ++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; @@ -4990,10 +4888,10 @@ index 0000000..79c3207 +MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c new file mode 100644 -index 0000000..98508b1 +index 0000000..378d2c5 --- /dev/null +++ b/lib/zstd/decompress.c -@@ -0,0 +1,2377 @@ +@@ -0,0 +1,2349 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. @@ -5068,8 +4966,8 @@ index 0000000..98508b1 + const HUF_DTable* HUFptr; + ZSTD_entropyTables_t entropy; + const void* previousDstEnd; /* detect continuity */ -+ const void* base; /* start of current segment */ -+ const void* vBase; /* virtual start of previous segment if it was just before current one */ ++ const void* base; /* start of curr segment */ ++ const void* vBase; /* virtual start of previous segment if it was just before curr one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; + ZSTD_frameParams fParams; @@ -5145,30 +5043,6 @@ index 0000000..98508b1 + memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ +} + -+#if 0 -+/* deprecated */ -+static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) -+{ -+ ZSTD_decompressBegin(dstDCtx); /* init */ -+ if (srcDCtx) { /* support refDCtx on NULL */ -+ dstDCtx->dictEnd = srcDCtx->dictEnd; -+ dstDCtx->vBase = srcDCtx->vBase; -+ dstDCtx->base = srcDCtx->base; -+ dstDCtx->previousDstEnd = srcDCtx->previousDstEnd; -+ dstDCtx->dictID = srcDCtx->dictID; -+ dstDCtx->litEntropy = srcDCtx->litEntropy; -+ dstDCtx->fseEntropy = srcDCtx->fseEntropy; -+ dstDCtx->LLTptr = srcDCtx->entropy.LLTable; -+ dstDCtx->MLTptr = srcDCtx->entropy.MLTable; -+ dstDCtx->OFTptr = srcDCtx->entropy.OFTable; -+ dstDCtx->HUFptr = srcDCtx->entropy.hufTable; -+ dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0]; -+ dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1]; -+ dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2]; -+ } -+} -+#endif -+ +static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict); + + @@ -5184,7 +5058,7 @@ index 0000000..98508b1 +unsigned ZSTD_isFrame(const void* buffer, size_t size) +{ + if (size < 4) return 0; -+ { U32 const magic = MEM_readLE32(buffer); ++ { U32 const magic = MEM_readLE32(buffer); + if (magic == ZSTD_MAGICNUMBER) return 1; + if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } @@ -5198,7 +5072,7 @@ index 0000000..98508b1 +static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); -+ { BYTE const fhd = ((const BYTE*)src)[4]; ++ { BYTE const fhd = ((const BYTE*)src)[4]; + U32 const dictID= fhd & 3; + U32 const singleSegment = (fhd >> 5) & 1; + U32 const fcsId = fhd >> 6; @@ -5233,7 +5107,7 @@ index 0000000..98508b1 + { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); + if (srcSize < fhsize) return fhsize; } + -+ { BYTE const fhdByte = ip[4]; ++ { BYTE const fhdByte = ip[4]; + size_t pos = 5; + U32 const dictIDSizeCode = fhdByte&3; + U32 const checksumFlag = (fhdByte>>2)&1; @@ -5379,7 +5253,7 @@ index 0000000..98508b1 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) +{ + if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); -+ { U32 const cBlockHeader = MEM_readLE24(src); ++ { U32 const cBlockHeader = MEM_readLE24(src); + U32 const cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); @@ -5414,7 +5288,7 @@ index 0000000..98508b1 +{ + if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); + -+ { const BYTE* const istart = (const BYTE*) src; ++ { const BYTE* const istart = (const BYTE*) src; + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + + switch(litEncType) @@ -5424,7 +5298,7 @@ index 0000000..98508b1 + /* fall-through */ + case set_compressed: + if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ -+ { size_t lhSize, litSize, litCSize; ++ { size_t lhSize, litSize, litCSize; + U32 singleStream=0; + U32 const lhlCode = (istart[0] >> 2) & 3; + U32 const lhc = MEM_readLE32(istart); @@ -5471,7 +5345,7 @@ index 0000000..98508b1 + } + + case set_basic: -+ { size_t litSize, lhSize; ++ { size_t litSize, lhSize; + U32 const lhlCode = ((istart[0]) >> 2) & 3; + switch(lhlCode) + { @@ -5504,7 +5378,7 @@ index 0000000..98508b1 + } + + case set_rle: -+ { U32 const lhlCode = ((istart[0]) >> 2) & 3; ++ { U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t litSize, lhSize; + switch(lhlCode) + { @@ -5738,7 +5612,7 @@ index 0000000..98508b1 + return 0; + default : /* impossible */ + case set_compressed : -+ { U32 tableLog; ++ { U32 tableLog; + S16 norm[MaxSeq+1]; + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (FSE_isError(headerSize)) return ERROR(corruption_detected); @@ -5760,7 +5634,7 @@ index 0000000..98508b1 + if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); + + /* SeqHead */ -+ { int nbSeq = *ip++; ++ { int nbSeq = *ip++; + if (!nbSeq) { *nbSeqPtr=0; return 1; } + if (nbSeq > 0x7F) { + if (nbSeq == 0xFF) { @@ -5776,25 +5650,25 @@ index 0000000..98508b1 + + /* FSE table descriptors */ + if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ -+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); ++ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ -+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, ++ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + LLtype, MaxLL, LLFSELog, + ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); + ip += llhSize; + } -+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, ++ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + OFtype, MaxOff, OffFSELog, + ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); + ip += ofhSize; + } -+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, ++ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + MLtype, MaxML, MLFSELog, + ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); @@ -5860,8 +5734,8 @@ index 0000000..98508b1 + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } -+ /* span extDict & currentPrefixSegment */ -+ { size_t const length1 = dictEnd - match; ++ /* span extDict & currPrefixSegment */ ++ { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; @@ -5905,7 +5779,7 @@ index 0000000..98508b1 + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ -+ { size_t offset; ++ { size_t offset; + if (!ofCode) + offset = 0; + else { @@ -5983,8 +5857,8 @@ index 0000000..98508b1 + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } -+ /* span extDict & currentPrefixSegment */ -+ { size_t const length1 = dictEnd - match; ++ /* span extDict & currPrefixSegment */ ++ { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; @@ -6047,7 +5921,7 @@ index 0000000..98508b1 + int nbSeq; + + /* Build Decoding Tables */ -+ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); ++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } @@ -6064,7 +5938,7 @@ index 0000000..98508b1 + + for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { + nbSeq--; -+ { seq_t const sequence = ZSTD_decodeSequence(&seqState); ++ { seq_t const sequence = ZSTD_decodeSequence(&seqState); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; @@ -6077,7 +5951,7 @@ index 0000000..98508b1 + } + + /* last literal segment */ -+ { size_t const lastLLSize = litEnd - litPtr; ++ { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; @@ -6118,7 +5992,7 @@ index 0000000..98508b1 + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ -+ { size_t offset; ++ { size_t offset; + if (!ofCode) + offset = 0; + else { @@ -6159,7 +6033,7 @@ index 0000000..98508b1 + if (MEM_32bits() || + (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + -+ { size_t const pos = seqState->pos + seq.litLength; ++ { size_t const pos = seqState->pos + seq.litLength; + seq.match = seqState->base + pos - seq.offset; /* single memory segment */ + if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ + seqState->pos = pos + seq.matchLength; @@ -6196,11 +6070,9 @@ index 0000000..98508b1 + const BYTE* match = sequence.match; + + /* check */ -+#if 1 + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); -+#endif + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); @@ -6210,7 +6082,6 @@ index 0000000..98508b1 + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ -+#if 1 + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); @@ -6218,8 +6089,8 @@ index 0000000..98508b1 + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } -+ /* span extDict & currentPrefixSegment */ -+ { size_t const length1 = dictEnd - match; ++ /* span extDict & currPrefixSegment */ ++ { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; @@ -6231,7 +6102,6 @@ index 0000000..98508b1 + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ -+#endif + + /* match within prefix */ + if (sequence.offset < 8) { @@ -6283,7 +6153,7 @@ index 0000000..98508b1 + int nbSeq; + + /* Build Decoding Tables */ -+ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); ++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } @@ -6337,7 +6207,7 @@ index 0000000..98508b1 + } + + /* last literal segment */ -+ { size_t const lastLLSize = litEnd - litPtr; ++ { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; @@ -6356,7 +6226,7 @@ index 0000000..98508b1 + if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); + + /* Decode literals section */ -+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); ++ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + if (ZSTD_isError(litCSize)) return litCSize; + ip += litCSize; + srcSize -= litCSize; @@ -6430,7 +6300,7 @@ index 0000000..98508b1 + if (ZSTD_isError(headerSize)) return headerSize; + + /* Frame Header */ -+ { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); ++ { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); + if (ZSTD_isError(ret)) return ret; + if (ret > 0) return ERROR(srcSize_wrong); + } @@ -6478,7 +6348,7 @@ index 0000000..98508b1 + if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + + /* Frame Header */ -+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); ++ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; + if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); @@ -6590,7 +6460,7 @@ index 0000000..98508b1 + } + ZSTD_checkContinuity(dctx, dst); + -+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, ++ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + &src, &srcSize); + if (ZSTD_isError(res)) return res; + /* don't need to bounds check this, ZSTD_decompressFrame will have @@ -6683,7 +6553,7 @@ index 0000000..98508b1 + return 0; + + case ZSTDds_decodeBlockHeader: -+ { blockProperties_t bp; ++ { blockProperties_t bp; + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + dctx->expected = cBlockSize; @@ -6710,7 +6580,7 @@ index 0000000..98508b1 + } + case ZSTDds_decompressLastBlock: + case ZSTDds_decompressBlock: -+ { size_t rSize; ++ { size_t rSize; + switch(dctx->bType) + { + case bt_compressed: @@ -6745,7 +6615,7 @@ index 0000000..98508b1 + return rSize; + } + case ZSTDds_checkChecksum: -+ { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); ++ { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ + if (check32 != h32) return ERROR(checksum_wrong); + dctx->expected = 0; @@ -6753,13 +6623,13 @@ index 0000000..98508b1 + return 0; + } + case ZSTDds_decodeSkippableHeader: -+ { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); ++ { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); + dctx->stage = ZSTDds_skipFrame; + return 0; + } + case ZSTDds_skipFrame: -+ { dctx->expected = 0; ++ { dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } @@ -6790,12 +6660,12 @@ index 0000000..98508b1 + dictPtr += 8; /* skip header = magic + dictID */ + + -+ { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); ++ { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); + dictPtr += hSize; + } + -+ { short offcodeNCount[MaxOff+1]; ++ { short offcodeNCount[MaxOff+1]; + U32 offcodeMaxValue = MaxOff, offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); @@ -6804,7 +6674,7 @@ index 0000000..98508b1 + dictPtr += offcodeHeaderSize; + } + -+ { short matchlengthNCount[MaxML+1]; ++ { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -6813,7 +6683,7 @@ index 0000000..98508b1 + dictPtr += matchlengthHeaderSize; + } + -+ { short litlengthNCount[MaxLL+1]; ++ { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -6823,7 +6693,7 @@ index 0000000..98508b1 + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); -+ { int i; ++ { int i; + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); + for (i=0; i<3; i++) { + U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; @@ -6837,14 +6707,14 @@ index 0000000..98508b1 +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); -+ { U32 const magic = MEM_readLE32(dict); ++ { U32 const magic = MEM_readLE32(dict); + if (magic != ZSTD_DICT_MAGIC) { + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ + } } + dctx->dictID = MEM_readLE32((const char*)dict + 4); + + /* load entropy tables */ -+ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); ++ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); + if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); + dict = (const char*)dict + eSize; + dictSize -= eSize; @@ -6921,7 +6791,7 @@ index 0000000..98508b1 + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (ddict->dictSize < 8) return 0; -+ { U32 const magic = MEM_readLE32(ddict->dictContent); ++ { U32 const magic = MEM_readLE32(ddict->dictContent); + if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */ + } + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); @@ -6937,7 +6807,7 @@ index 0000000..98508b1 +{ + if (!customMem.customAlloc || !customMem.customFree) return NULL; + -+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); ++ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + if (!ddict) return NULL; + ddict->cMem = customMem; + @@ -6954,7 +6824,7 @@ index 0000000..98508b1 + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + /* parse dictionary content */ -+ { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); ++ { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); + if (ZSTD_isError(errorCode)) { + ZSTD_freeDDict(ddict); + return NULL; @@ -6978,7 +6848,7 @@ index 0000000..98508b1 +size_t ZSTD_freeDDict(ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support free on NULL */ -+ { ZSTD_customMem const cMem = ddict->cMem; ++ { ZSTD_customMem const cMem = ddict->cMem; + ZSTD_free(ddict->dictBuffer, cMem); + ZSTD_free(ddict, cMem); + return 0; @@ -7125,7 +6995,7 @@ index 0000000..98508b1 +size_t ZSTD_freeDStream(ZSTD_DStream* zds) +{ + if (zds==NULL) return 0; /* support free on null */ -+ { ZSTD_customMem const cMem = zds->customMem; ++ { ZSTD_customMem const cMem = zds->customMem; + ZSTD_freeDCtx(zds->dctx); + zds->dctx = NULL; + ZSTD_freeDDict(zds->ddictLocal); @@ -7182,7 +7052,7 @@ index 0000000..98508b1 + /* fall-through */ + + case zdss_loadHeader : -+ { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); ++ { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); + if (ZSTD_isError(hSize)) + return hSize; + if (hSize != 0) { /* need more input */ @@ -7214,9 +7084,9 @@ index 0000000..98508b1 + + /* Consume header */ + ZSTD_refDDict(zds->dctx, zds->ddict); -+ { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ ++ { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); -+ { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size)); + } } + @@ -7224,7 +7094,7 @@ index 0000000..98508b1 + if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); + + /* Adapt buffer sizes to frame header instructions */ -+ { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); ++ { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); + size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; + zds->blockSize = blockSize; + if (zds->inBuffSize < blockSize) { @@ -7243,7 +7113,7 @@ index 0000000..98508b1 + /* pass-through */ + + case zdss_read: -+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (neededInSize==0) { /* end of frame */ + zds->stage = zdss_init; + someMoreWork = 0; @@ -7267,7 +7137,7 @@ index 0000000..98508b1 + } + + case zdss_load: -+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ + size_t loadedSize; + if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ @@ -7290,7 +7160,7 @@ index 0000000..98508b1 + } } + + case zdss_flush: -+ { size_t const toFlushSize = zds->outEnd - zds->outStart; ++ { size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); + op += flushedSize; + zds->outStart += flushedSize; @@ -7310,7 +7180,7 @@ index 0000000..98508b1 + /* result */ + input->pos += (size_t)(ip-istart); + output->pos += (size_t)(op-ostart); -+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (!nextSrcSizeHint) { /* frame fully decoded */ + if (zds->outEnd == zds->outStart) { /* output fully flushed */ + if (zds->hostageByte) { @@ -7373,7 +7243,7 @@ index 0000000..98508b1 +MODULE_DESCRIPTION("Zstd Decompressor"); diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c new file mode 100644 -index 0000000..68d88082 +index 0000000..36ad266 --- /dev/null +++ b/lib/zstd/entropy_common.c @@ -0,0 +1,217 @@ @@ -7486,7 +7356,7 @@ index 0000000..68d88082 + } else { + bitStream >>= 2; + } } -+ { int const max = (2*threshold-1) - remaining; ++ { int const max = (2*threshold-1) - remaining; + int count; + + if ((bitStream & (threshold-1)) < (U32)max) { @@ -7551,7 +7421,7 @@ index 0000000..68d88082 + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; -+ { U32 n; ++ { U32 n; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; @@ -7566,7 +7436,7 @@ index 0000000..68d88082 + /* collect weight stats */ + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + weightTotal = 0; -+ { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; @@ -7574,11 +7444,11 @@ index 0000000..68d88082 + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ -+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1; ++ { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); + *tableLogPtr = tableLog; + /* determine last weight */ -+ { U32 const total = 1 << tableLog; ++ { U32 const total = 1 << tableLog; + U32 const rest = total - weightTotal; + U32 const verif = 1 << BIT_highbit32(rest); + U32 const lastWeight = BIT_highbit32(rest) + 1; @@ -7646,7 +7516,7 @@ index 0000000..8cf148b +#endif /* ERROR_H_MODULE */ diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h new file mode 100644 -index 0000000..14fa439 +index 0000000..6a78957 --- /dev/null +++ b/lib/zstd/fse.h @@ -0,0 +1,606 @@ @@ -8111,7 +7981,7 @@ index 0000000..14fa439 +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) +{ + FSE_initCState(statePtr, ct); -+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; ++ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; @@ -8258,10 +8128,10 @@ index 0000000..14fa439 +#endif /* FSE_H */ diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c new file mode 100644 -index 0000000..b6a6d46 +index 0000000..d0b5673 --- /dev/null +++ b/lib/zstd/fse_compress.c -@@ -0,0 +1,788 @@ +@@ -0,0 +1,774 @@ +/* ****************************************************************** + FSE : Finite State Entropy encoder + Copyright (C) 2013-2015, Yann Collet. @@ -8370,7 +8240,7 @@ index 0000000..b6a6d46 + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + + /* symbol start positions */ -+ { U32 u; ++ { U32 u; + cumul[0] = 0; + for (u=1; u<=maxSymbolValue+1; u++) { + if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ @@ -8383,7 +8253,7 @@ index 0000000..b6a6d46 + } + + /* Spread symbols */ -+ { U32 position = 0; ++ { U32 position = 0; + U32 symbol; + for (symbol=0; symbol<=maxSymbolValue; symbol++) { + int nbOccurences; @@ -8397,13 +8267,13 @@ index 0000000..b6a6d46 + } + + /* Build table */ -+ { U32 u; for (u=0; u>= 16; + bitCount -= 16; + } } -+ { int count = normalizedCounter[charnum++]; ++ { int count = normalizedCounter[charnum++]; + int const max = (2*threshold-1)-remaining; + remaining -= count < 0 ? -count : count; + count++; /* +1 for extra accuracy */ @@ -8601,7 +8471,7 @@ index 0000000..b6a6d46 + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ -+ { U32 cached = MEM_read32(ip); ip += 4; ++ { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; @@ -8636,7 +8506,7 @@ index 0000000..b6a6d46 + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + -+ { U32 s; for (s=0; s<=maxSymbolValue; s++) { ++ { U32 s; for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } @@ -8781,7 +8651,7 @@ index 0000000..b6a6d46 + return 0; + } + -+ { U64 const vStepLog = 62 - tableLog; ++ { U64 const vStepLog = 62 - tableLog; + U64 const mid = (1ULL << (vStepLog-1)) - 1; + U64 const rStep = ((((U64)1< FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ + -+ { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; ++ { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + U64 const scale = 62 - tableLog; + U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ + U64 const vStep = 1ULL<<(scale-20); @@ -8845,20 +8715,6 @@ index 0000000..b6a6d46 + else normalizedCounter[largest] += (short)stillToDistribute; + } + -+#if 0 -+ { /* Print Table (debug) */ -+ U32 s; -+ U32 nTotal = 0; -+ for (s=0; s<=maxSymbolValue; s++) -+ printf("%3i: %4i \n", s, normalizedCounter[s]); -+ for (s=0; s<=maxSymbolValue; s++) -+ nTotal += abs(normalizedCounter[s]); -+ if (nTotal != (1U< not compressible */ + if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ @@ -9031,13 +8887,13 @@ index 0000000..b6a6d46 + CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); + + /* Write table description header */ -+ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); ++ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += nc_err; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); -+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); ++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } @@ -9052,7 +8908,7 @@ index 0000000..b6a6d46 +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c new file mode 100644 -index 0000000..2a35f17 +index 0000000..6de5411 --- /dev/null +++ b/lib/zstd/fse_decompress.c @@ -0,0 +1,292 @@ @@ -9156,10 +9012,10 @@ index 0000000..2a35f17 + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ -+ { FSE_DTableHeader DTableH; ++ { FSE_DTableHeader DTableH; + DTableH.tableLog = (U16)tableLog; + DTableH.fastMode = 1; -+ { S16 const largeLimit= (S16)(1 << (tableLog-1)); ++ { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s not compressible */ + } @@ -9661,13 +9517,13 @@ index 0000000..a1a1d45 + CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + + /* Write table description header */ -+ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); ++ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += hSize; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); -+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); ++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } @@ -9703,7 +9559,7 @@ index 0000000..a1a1d45 + huffWeight[n] = bitsToWeight[CTable[n].nbBits]; + + /* attempt weights compression by FSE */ -+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); ++ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); + if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ + op[0] = (BYTE)hSize; + return hSize+1; @@ -9735,26 +9591,26 @@ index 0000000..a1a1d45 + if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); + + /* Prepare base value per rank */ -+ { U32 n, nextRankStart = 0; ++ { U32 n, nextRankStart = 0; + for (n=1; n<=tableLog; n++) { -+ U32 current = nextRankStart; ++ U32 curr = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); -+ rankVal[n] = current; ++ rankVal[n] = curr; + } } + + /* fill nbBits */ -+ { U32 n; for (n=0; nn=tableLog+1 */ ++ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ + U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; + { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; @@ -9781,7 +9637,7 @@ index 0000000..a1a1d45 + if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ + + /* there are several too large elements (at least >= 2) */ -+ { int totalCost = 0; ++ { int totalCost = 0; + const U32 baseCost = 1 << (largestBits - maxNbBits); + U32 n = lastNonNull; + @@ -9796,17 +9652,17 @@ index 0000000..a1a1d45 + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + + /* repay normalized cost */ -+ { U32 const noSymbol = 0xF0F0F0F0; ++ { U32 const noSymbol = 0xF0F0F0F0; + U32 rankLast[HUF_TABLELOG_MAX+2]; + int pos; + + /* Get pos of last (smallest) symbol per rank */ + memset(rankLast, 0xF0, sizeof(rankLast)); -+ { U32 currentNbBits = maxNbBits; ++ { U32 currNbBits = maxNbBits; + for (pos=n ; pos >= 0; pos--) { -+ if (huffNode[pos].nbBits >= currentNbBits) continue; -+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ -+ rankLast[maxNbBits-currentNbBits] = pos; ++ if (huffNode[pos].nbBits >= currNbBits) continue; ++ currNbBits = huffNode[pos].nbBits; /* < maxNbBits */ ++ rankLast[maxNbBits-currNbBits] = pos; + } } + + while (totalCost > 0) { @@ -9816,7 +9672,7 @@ index 0000000..a1a1d45 + U32 lowPos = rankLast[nBitsToDecrease-1]; + if (highPos == noSymbol) continue; + if (lowPos == noSymbol) break; -+ { U32 const highTotal = huffNode[highPos].count; ++ { U32 const highTotal = huffNode[highPos].count; + U32 const lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; + } } @@ -9854,7 +9710,7 @@ index 0000000..a1a1d45 + +typedef struct { + U32 base; -+ U32 current; ++ U32 curr; +} rankPos; + +static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) @@ -9868,11 +9724,11 @@ index 0000000..a1a1d45 + rank[r].base ++; + } + for (n=30; n>0; n--) rank[n-1].base += rank[n].base; -+ for (n=0; n<32; n++) rank[n].current = rank[n].base; ++ for (n=0; n<32; n++) rank[n].curr = rank[n].base; + for (n=0; n<=maxSymbolValue; n++) { + U32 const c = count[n]; + U32 const r = BIT_highbit32(c+1) + 1; -+ U32 pos = rank[r].current++; ++ U32 pos = rank[r].curr++; + while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; + huffNode[pos].count = c; + huffNode[pos].byte = (BYTE)n; @@ -9934,13 +9790,13 @@ index 0000000..a1a1d45 + maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); + + /* fill result into tree (val, nbBits) */ -+ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; ++ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine stating value per rank */ -+ { U16 min = 0; ++ { U16 min = 0; + for (n=maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; @@ -10045,28 +9901,28 @@ index 0000000..a1a1d45 + if (srcSize < 12) return 0; /* no saving possible : too small input */ + op += 6; /* jumpTable */ + -+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart, (U16)cSize); + op += cSize; + } + + ip += segmentSize; -+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+2, (U16)cSize); + op += cSize; + } + + ip += segmentSize; -+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+4, (U16)cSize); + op += cSize; + } + + ip += segmentSize; -+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); + if (cSize==0) return 0; + op += cSize; + } @@ -10114,7 +9970,7 @@ index 0000000..a1a1d45 + if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); + if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ + if (!dstSize) return 0; /* cannot fit within dst budget */ -+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ ++ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* curr block size limit */ + if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; + if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; @@ -10132,7 +9988,7 @@ index 0000000..a1a1d45 + } + + /* Scan input and build symbol stats */ -+ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); ++ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); + if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ + if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ + } @@ -10148,14 +10004,14 @@ index 0000000..a1a1d45 + + /* Build Huffman Tree */ + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); -+ { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); ++ { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); + huffLog = (U32)maxBits; + /* Zero the unused symbols so we can check it for validity */ + memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); + } + + /* Write table description header */ -+ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); ++ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); + /* Check if using the previous table will be beneficial */ + if (repeat && *repeat != HUF_repeat_none) { + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); @@ -10209,7 +10065,7 @@ index 0000000..a1a1d45 +} diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c new file mode 100644 -index 0000000..f73223c +index 0000000..2d9b33b --- /dev/null +++ b/lib/zstd/huf_decompress.c @@ -0,0 +1,835 @@ @@ -10306,7 +10162,7 @@ index 0000000..f73223c + if (HUF_isError(iSize)) return iSize; + + /* Table header */ -+ { DTableDesc dtd = HUF_getDTableDesc(DTable); ++ { DTableDesc dtd = HUF_getDTableDesc(DTable); + if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ + dtd.tableType = 0; + dtd.tableLog = (BYTE)tableLog; @@ -10314,15 +10170,15 @@ index 0000000..f73223c + } + + /* Calculate starting value for each rank */ -+ { U32 n, nextRankStart = 0; ++ { U32 n, nextRankStart = 0; + for (n=1; n> 1; @@ -10435,7 +10291,7 @@ index 0000000..f73223c + /* Check */ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + -+ { const BYTE* const istart = (const BYTE*) cSrc; ++ { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable + 1; @@ -10574,7 +10430,7 @@ index 0000000..f73223c + } + + /* fill DTable */ -+ { U32 s; for (s=0; s= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ + -+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); ++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } @@ -11043,7 +10899,7 @@ index 0000000..f73223c + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + -+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); ++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } @@ -11340,10 +11196,10 @@ index 0000000..106f540 +} diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h new file mode 100644 -index 0000000..a61bd27 +index 0000000..5ed5419 --- /dev/null +++ b/lib/zstd/zstd_internal.h -@@ -0,0 +1,274 @@ +@@ -0,0 +1,261 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. @@ -11584,20 +11440,7 @@ index 0000000..a61bd27 + +MEM_STATIC U32 ZSTD_highbit32(U32 val) +{ -+# if defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ + return 31 - __builtin_clz(val); -+# else /* Software version */ -+ static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; -+ U32 v = val; -+ int r; -+ v |= v >> 1; -+ v |= v >> 2; -+ v |= v >> 4; -+ v |= v >> 8; -+ v |= v >> 16; -+ r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; -+ return r; -+# endif +} + + @@ -11620,7 +11463,7 @@ index 0000000..a61bd27 +#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h new file mode 100644 -index 0000000..297a715 +index 0000000..9bd5303 --- /dev/null +++ b/lib/zstd/zstd_opt.h @@ -0,0 +1,921 @@ @@ -11753,7 +11596,7 @@ index 0000000..297a715 + } + + /* literal Length */ -+ { const BYTE LL_deltaCode = 19; ++ { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1); + } @@ -11775,7 +11618,7 @@ index 0000000..297a715 + if (!ultra && offCode >= 20) price += (offCode-19)*2; + + /* match Length */ -+ { const BYTE ML_deltaCode = 36; ++ { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1); + } @@ -11794,20 +11637,20 @@ index 0000000..297a715 + seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + + /* literal Length */ -+ { const BYTE LL_deltaCode = 19; ++ { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + seqStorePtr->litLengthFreq[llCode]++; + seqStorePtr->litLengthSum++; + } + + /* match offset */ -+ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); ++ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + seqStorePtr->offCodeSum++; + seqStorePtr->offCodeFreq[offCode]++; + } + + /* match Length */ -+ { const BYTE ML_deltaCode = 36; ++ { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + seqStorePtr->matchLengthFreq[mlCode]++; + seqStorePtr->matchLengthSum++; @@ -11818,7 +11661,7 @@ index 0000000..297a715 + + +#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ -+ { \ ++ { \ + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ + opt[pos].mlen = mlen_; \ + opt[pos].off = offset_; \ @@ -11859,7 +11702,7 @@ index 0000000..297a715 + U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) +{ + const BYTE* const base = zc->base; -+ const U32 current = (U32)(ip-base); ++ const U32 curr = (U32)(ip-base); + const U32 hashLog = zc->params.cParams.hashLog; + const size_t h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const hashTable = zc->hashTable; @@ -11872,11 +11715,11 @@ index 0000000..297a715 + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; -+ const U32 btLow = btMask >= current ? 0 : current - btMask; ++ const U32 btLow = btMask >= curr ? 0 : curr - btMask; + const U32 windowLow = zc->lowLimit; -+ U32* smallerPtr = bt + 2*(current&btMask); -+ U32* largerPtr = bt + 2*(current&btMask) + 1; -+ U32 matchEndIdx = current+8; ++ U32* smallerPtr = bt + 2*(curr&btMask); ++ U32* largerPtr = bt + 2*(curr&btMask) + 1; ++ U32 matchEndIdx = curr+8; + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + @@ -11885,31 +11728,31 @@ index 0000000..297a715 + + if (minMatch == 3) { /* HC3 match finder */ + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); -+ if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { ++ if (matchIndex3>windowLow && (curr - matchIndex3 < (1<<18))) { + const BYTE* match; -+ size_t currentMl=0; ++ size_t currMl=0; + if ((!extDict) || matchIndex3 >= dictLimit) { + match = base + matchIndex3; -+ if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); ++ if (match[bestLength] == ip[bestLength]) currMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex3; + if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ -+ currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; ++ currMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; + } + + /* save best solution */ -+ if (currentMl > bestLength) { -+ bestLength = currentMl; -+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; -+ matches[mnum].len = (U32)currentMl; ++ if (currMl > bestLength) { ++ bestLength = currMl; ++ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3; ++ matches[mnum].len = (U32)currMl; + mnum++; -+ if (currentMl > ZSTD_OPT_NUM) goto update; -+ if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ ++ if (currMl > ZSTD_OPT_NUM) goto update; ++ if (ip+currMl == iLimit) goto update; /* best possible, and avoid read overflow*/ + } + } + } + -+ hashTable[h] = current; /* Update Hash Table */ ++ hashTable[h] = curr; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* nextPtr = bt + 2*(matchIndex & btMask); @@ -11931,7 +11774,7 @@ index 0000000..297a715 + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; -+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; ++ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex; + matches[mnum].len = (U32)matchLength; + mnum++; + if (matchLength > ZSTD_OPT_NUM) break; @@ -11940,14 +11783,14 @@ index 0000000..297a715 + } + + if (match[matchLength] < ip[matchLength]) { -+ /* match is smaller than current */ ++ /* match is smaller than curr */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ -+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ + } else { -+ /* match is larger than current */ ++ /* match is larger than curr */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ @@ -11958,7 +11801,7 @@ index 0000000..297a715 + *smallerPtr = *largerPtr = 0; + +update: -+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; ++ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr+1; + return mnum; +} + @@ -12061,7 +11904,7 @@ index 0000000..297a715 + litlen = (U32)(ip - anchor); + + /* check repCode */ -+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); ++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) @@ -12146,7 +11989,7 @@ index 0000000..297a715 + } + + best_mlen = minMatch; -+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); ++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) @@ -12265,7 +12108,7 @@ index 0000000..297a715 + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Last Literals */ -+ { size_t const lastLLSize = iend - anchor; ++ { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } @@ -12310,19 +12153,19 @@ index 0000000..297a715 + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; -+ U32 current = (U32)(ip-base); ++ U32 curr = (U32)(ip-base); + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + opt[0].litlen = (U32)(ip - anchor); + + /* check repCode */ -+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); ++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i = (ip==anchor); i 0 && repCur <= (S32)current) ++ if ( (repCur > 0 && repCur <= (S32)curr) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected we should take it */ @@ -12412,13 +12255,13 @@ index 0000000..297a715 + } + + best_mlen = minMatch; -+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); ++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i = (mlen != 1); i 0 && repCur <= (S32)(current+cur)) ++ if ( (repCur > 0 && repCur <= (S32)(curr+cur)) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected */ @@ -12538,7 +12381,7 @@ index 0000000..297a715 + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Last Literals */ -+ { size_t lastLLSize = iend - anchor; ++ { size_t lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + }