Merge remote-tracking branch 'refs/remotes/Cyan4973/dev' into dev
commit
da1685e98d
1
Makefile
1
Makefile
|
@ -64,6 +64,7 @@ clean:
|
|||
@$(MAKE) -C $(ZSTDDIR) $@ > $(VOID)
|
||||
@$(MAKE) -C $(PRGDIR) $@ > $(VOID)
|
||||
@$(MAKE) -C $(ZWRAPDIR) $@ > $(VOID)
|
||||
@rm -f zstd
|
||||
@echo Cleaning completed
|
||||
|
||||
|
||||
|
|
6
NEWS
6
NEWS
|
@ -1,3 +1,9 @@
|
|||
v0.7.1
|
||||
fixed : ZBUFF_compressEnd() called multiple times with too small `dst` buffer, reported by Christophe Chevalier
|
||||
fixed : dictBuilder fails if first sample is too small, reported by Руслан Ковалёв
|
||||
fixed : corruption issue, reported by cj
|
||||
modified : checksum enabled by default in command line mode
|
||||
|
||||
v0.7.0
|
||||
New : Support for directory compression, using `-r`, thanks to Przemyslaw Skibinski
|
||||
New : Command `--rm`, to remove source file after successful de/compression
|
||||
|
|
|
@ -136,7 +136,7 @@ typedef U32 HUF_DTable;
|
|||
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
|
||||
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
|
||||
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
|
|
@ -61,7 +61,7 @@ extern "C" {
|
|||
***************************************/
|
||||
#define ZSTD_VERSION_MAJOR 0
|
||||
#define ZSTD_VERSION_MINOR 7
|
||||
#define ZSTD_VERSION_RELEASE 0
|
||||
#define ZSTD_VERSION_RELEASE 1
|
||||
|
||||
#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
|
||||
#define ZSTD_QUOTE(str) #str
|
||||
|
@ -132,7 +132,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapa
|
|||
/*! ZSTD_compress_usingDict() :
|
||||
* Compression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Note 1 : This function load the dictionary, resulting in a significant startup time.
|
||||
* Note 2 : `dict` must remain valid and unmodified during compression operation.
|
||||
* Note 2 : `dict` must remain accessible and unmodified during compression operation.
|
||||
* Note 3 : `dict` can be `NULL`, in which case, it's equivalent to ZSTD_compressCCtx() */
|
||||
ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
|
@ -144,7 +144,7 @@ ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
|
|||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression.
|
||||
* Note 1 : This function load the dictionary, resulting in a significant startup time
|
||||
* Note 2 : `dict` must remain valid and unmodified during compression operation.
|
||||
* Note 2 : `dict` must remain accessible and unmodified during compression operation.
|
||||
* Note 3 : `dict` can be `NULL`, in which case, it's equivalent to ZSTD_decompressDCtx() */
|
||||
ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
|
@ -192,7 +192,7 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
|
|||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used in association with a dynamic library, as they may change in the future.
|
||||
* They should never be used with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
@ -322,15 +322,22 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
|
|||
It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
|
||||
|
||||
Then, consume your input using ZSTD_compressContinue().
|
||||
ZSTD_compressContinue() presumes prior data is still accessible and unmodified (up to maximum distance size, see WindowLog).
|
||||
The interface is synchronous, so input will be entirely consumed and produce associated compressed output.
|
||||
You must ensure there is enough space in destination buffer to store compressed data under worst case scenario.
|
||||
Worst case evaluation is provided by ZSTD_compressBound().
|
||||
There are some important considerations to keep in mind when using this advanced function :
|
||||
- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only.
|
||||
- Interface is synchronous : input will be entirely consumed and produce 1+ compressed blocks.
|
||||
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
|
||||
Worst case evaluation is provided by ZSTD_compressBound().
|
||||
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
|
||||
- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
|
||||
It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
|
||||
- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
|
||||
In which case, it will "discard" the relevant memory section from its history.
|
||||
|
||||
|
||||
Finish a frame with ZSTD_compressEnd(), which will write the epilogue.
|
||||
Without epilogue, frames will be considered unfinished (broken) by decoders.
|
||||
|
||||
You can then reuse ZSTD_CCtx to compress some new frame.
|
||||
You can then reuse `ZSTD_CCtx` (ZSTD_compressBegin()) to compress some new frame.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
|
@ -357,7 +364,7 @@ ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t ds
|
|||
A ZSTD_DCtx object can be re-used multiple times.
|
||||
|
||||
First optional operation is to retrieve frame parameters, using ZSTD_getFrameParams(), which doesn't consume the input.
|
||||
It can provide the minimum size of rolling buffer required to properly decompress data,
|
||||
It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),
|
||||
and optionally the final size of uncompressed content.
|
||||
(Note : content size is an optional info that may not be present. 0 means : content size unknown)
|
||||
Frame parameters are extracted from the beginning of compressed frame.
|
||||
|
@ -373,7 +380,7 @@ ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t ds
|
|||
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
|
||||
ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
|
||||
ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
|
||||
They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
|
||||
|
||||
@result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
|
||||
|
|
|
@ -79,7 +79,7 @@ static size_t const ZBUFF_endFrameSize = ZSTD_BLOCKHEADERSIZE;
|
|||
* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
|
||||
* **************************************************/
|
||||
|
||||
typedef enum { ZBUFFcs_init, ZBUFFcs_load, ZBUFFcs_flush } ZBUFF_cStage;
|
||||
typedef enum { ZBUFFcs_init, ZBUFFcs_load, ZBUFFcs_flush, ZBUFFcs_final } ZBUFF_cStage;
|
||||
|
||||
/* *** Resources *** */
|
||||
struct ZBUFF_CCtx_s {
|
||||
|
@ -162,7 +162,7 @@ size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
|
|||
zbc->inToCompress = 0;
|
||||
zbc->inBuffPos = 0;
|
||||
zbc->inBuffTarget = zbc->blockSize;
|
||||
zbc->outBuffFlushedSize = 0;
|
||||
zbc->outBuffContentSize = zbc->outBuffFlushedSize = 0;
|
||||
zbc->stage = ZBUFFcs_load;
|
||||
return 0; /* ready to go */
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ MEM_STATIC size_t ZBUFF_limitCopy(void* dst, size_t dstCapacity, const void* src
|
|||
static size_t ZBUFF_compressContinue_generic(ZBUFF_CCtx* zbc,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr,
|
||||
int flush) /* aggregate : wait for full block before compressing */
|
||||
int flush)
|
||||
{
|
||||
U32 notDone = 1;
|
||||
const char* const istart = (const char*)src;
|
||||
|
@ -243,17 +243,20 @@ static size_t ZBUFF_compressContinue_generic(ZBUFF_CCtx* zbc,
|
|||
}
|
||||
|
||||
case ZBUFFcs_flush:
|
||||
/* flush into dst */
|
||||
{ size_t const toFlush = zbc->outBuffContentSize - zbc->outBuffFlushedSize;
|
||||
size_t const flushed = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outBuffFlushedSize, toFlush);
|
||||
op += flushed;
|
||||
zbc->outBuffFlushedSize += flushed;
|
||||
if (toFlush!=flushed) { notDone = 0; break; } /* not enough space within dst to store compressed block : stop there */
|
||||
zbc->outBuffContentSize = 0;
|
||||
zbc->outBuffFlushedSize = 0;
|
||||
if (toFlush!=flushed) { notDone = 0; break; } /* dst too small to store flushed data : stop there */
|
||||
zbc->outBuffContentSize = zbc->outBuffFlushedSize = 0;
|
||||
zbc->stage = ZBUFFcs_load;
|
||||
break;
|
||||
}
|
||||
|
||||
case ZBUFFcs_final:
|
||||
notDone = 0; /* do nothing */
|
||||
break;
|
||||
|
||||
default:
|
||||
return ERROR(GENERIC); /* impossible */
|
||||
}
|
||||
|
@ -291,19 +294,30 @@ size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
|
|||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + *dstCapacityPtr;
|
||||
BYTE* op = ostart;
|
||||
size_t outSize = *dstCapacityPtr;
|
||||
size_t epilogueSize, remaining;
|
||||
ZBUFF_compressFlush(zbc, dst, &outSize); /* flush any remaining inBuff */
|
||||
op += outSize;
|
||||
epilogueSize = ZSTD_compressEnd(zbc->zc, zbc->outBuff + zbc->outBuffContentSize, zbc->outBuffSize - zbc->outBuffContentSize); /* epilogue into outBuff */
|
||||
zbc->outBuffContentSize += epilogueSize;
|
||||
outSize = oend-op;
|
||||
zbc->stage = ZBUFFcs_flush;
|
||||
remaining = ZBUFF_compressFlush(zbc, op, &outSize); /* attempt to flush epilogue into dst */
|
||||
op += outSize;
|
||||
if (!remaining) zbc->stage = ZBUFFcs_init; /* close only if nothing left to flush */
|
||||
*dstCapacityPtr = op-ostart; /* tells how many bytes were written */
|
||||
return remaining;
|
||||
|
||||
if (zbc->stage != ZBUFFcs_final) {
|
||||
/* flush whatever remains */
|
||||
size_t outSize = *dstCapacityPtr;
|
||||
size_t const remainingToFlush = ZBUFF_compressFlush(zbc, dst, &outSize);
|
||||
op += outSize;
|
||||
if (remainingToFlush) {
|
||||
*dstCapacityPtr = op-ostart;
|
||||
return remainingToFlush + ZBUFF_endFrameSize;
|
||||
}
|
||||
/* create epilogue */
|
||||
zbc->stage = ZBUFFcs_final;
|
||||
zbc->outBuffContentSize = ZSTD_compressEnd(zbc->zc, zbc->outBuff, zbc->outBuffSize); /* epilogue into outBuff */
|
||||
}
|
||||
|
||||
/* flush epilogue */
|
||||
{ size_t const toFlush = zbc->outBuffContentSize - zbc->outBuffFlushedSize;
|
||||
size_t const flushed = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outBuffFlushedSize, toFlush);
|
||||
op += flushed;
|
||||
zbc->outBuffFlushedSize += flushed;
|
||||
*dstCapacityPtr = op-ostart;
|
||||
if (toFlush==flushed) zbc->stage = ZBUFFcs_init; /* end reached */
|
||||
return toFlush - flushed;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -246,7 +246,7 @@ ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, U
|
|||
|
||||
size_t ZSTD_sizeofCCtx(ZSTD_compressionParameters cParams) /* hidden interface, for paramagrill */
|
||||
{
|
||||
ZSTD_CCtx* zc = ZSTD_createCCtx();
|
||||
ZSTD_CCtx* const zc = ZSTD_createCCtx();
|
||||
ZSTD_parameters params;
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.cParams = cParams;
|
||||
|
@ -295,7 +295,7 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
|
|||
zc->seqStore.buffer = zc->hashTable3 + h3Size;
|
||||
zc->hufTable = (HUF_CElt*)zc->seqStore.buffer;
|
||||
zc->flagStaticTables = 0;
|
||||
zc->seqStore.buffer = ((U32*)(zc->seqStore.buffer)) + 256;
|
||||
zc->seqStore.buffer = ((U32*)(zc->seqStore.buffer)) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
|
||||
|
||||
zc->nextToUpdate = 1;
|
||||
zc->nextSrc = NULL;
|
||||
|
@ -313,14 +313,17 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
|
|||
zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
|
||||
zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
|
||||
zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
|
||||
zc->seqStore.matchTable = (ZSTD_match_t*)((void*)(zc->seqStore.offCodeFreq + (MaxOff+1)));
|
||||
zc->seqStore.priceTable = (ZSTD_optimal_t*)((void*)(zc->seqStore.matchTable + ZSTD_OPT_NUM+1));
|
||||
zc->seqStore.buffer = zc->seqStore.offCodeFreq + (MaxOff+1);
|
||||
zc->seqStore.matchTable = (ZSTD_match_t*)zc->seqStore.buffer;
|
||||
zc->seqStore.buffer = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
|
||||
zc->seqStore.priceTable = (ZSTD_optimal_t*)zc->seqStore.buffer;
|
||||
zc->seqStore.buffer = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
|
||||
zc->seqStore.litLengthSum = 0;
|
||||
}
|
||||
zc->seqStore.offsetStart = (U32*) (zc->seqStore.buffer);
|
||||
zc->seqStore.litLengthStart = (U16*) (void*)(zc->seqStore.offsetStart + maxNbSeq);
|
||||
zc->seqStore.matchLengthStart = (U16*) (void*)(zc->seqStore.litLengthStart + maxNbSeq);
|
||||
zc->seqStore.offsetStart = (U32*)(zc->seqStore.buffer);
|
||||
zc->seqStore.buffer = zc->seqStore.offsetStart + maxNbSeq;
|
||||
zc->seqStore.litLengthStart = (U16*)zc->seqStore.buffer;
|
||||
zc->seqStore.matchLengthStart = zc->seqStore.litLengthStart + maxNbSeq;
|
||||
zc->seqStore.llCodeStart = (BYTE*) (zc->seqStore.matchLengthStart + maxNbSeq);
|
||||
zc->seqStore.mlCodeStart = zc->seqStore.llCodeStart + maxNbSeq;
|
||||
zc->seqStore.offCodeStart = zc->seqStore.mlCodeStart + maxNbSeq;
|
||||
|
@ -1024,11 +1027,12 @@ static unsigned ZSTD_NbCommonBytes (register size_t val)
|
|||
}
|
||||
|
||||
|
||||
static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
|
||||
static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
|
||||
{
|
||||
const BYTE* const pStart = pIn;
|
||||
const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
|
||||
|
||||
while ((pIn<pInLimit-(sizeof(size_t)-1))) {
|
||||
while (pIn < pInLoopLimit) {
|
||||
size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
|
||||
if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
|
||||
pIn += ZSTD_NbCommonBytes(diff);
|
||||
|
@ -1136,35 +1140,34 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mlCode;
|
||||
size_t offset;
|
||||
size_t mLength;
|
||||
size_t const h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndex = hashTable[h];
|
||||
const BYTE* match = base + matchIndex;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
|
||||
mlCode = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
|
||||
mLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mlCode-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex <= lowestIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
size_t offset;
|
||||
if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
mlCode = ZSTD_count(ip+EQUAL_READ32, match+EQUAL_READ32, iend) + EQUAL_READ32;
|
||||
mLength = ZSTD_count(ip+EQUAL_READ32, match+EQUAL_READ32, iend) + EQUAL_READ32;
|
||||
offset = ip-match;
|
||||
while ((ip>anchor) && (match>lowest) && (ip[-1] == match[-1])) { ip--; match--; mlCode++; } /* catch up */
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mlCode-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
ip += mlCode;
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
|
@ -1176,18 +1179,18 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rlCode = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
|
||||
size_t const rLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
|
||||
{ size_t const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rlCode-MINMATCH);
|
||||
ip += rlCode;
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
cctx->savedRep[0] = offset_1 ? (U32)offset_1 : (U32)(iend-base);
|
||||
cctx->savedRep[1] = offset_2 ? (U32)offset_2 : (U32)(iend-base);
|
||||
cctx->savedRep[0] = offset_1 ? (U32)offset_1 : (U32)(iend - base) + 1;
|
||||
cctx->savedRep[1] = offset_2 ? (U32)offset_2 : (U32)(iend - base) + 1;
|
||||
|
||||
/* Last Literals */
|
||||
{ size_t const lastLLSize = iend - anchor;
|
||||
|
@ -1237,31 +1240,25 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
|
||||
|
||||
/* init */
|
||||
/* skip first position to avoid read overflow during repcode match check */
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
|
||||
ip++;
|
||||
|
||||
/* Main Search Loop */
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
const U32 matchIndex = hashTable[h];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 repIndex = current + 1 - offset_1;
|
||||
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
|
||||
const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* repMatch = repBase + repIndex;
|
||||
size_t mlCode;
|
||||
U32 offset;
|
||||
size_t mLength;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mlCode = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
|
||||
mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mlCode-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex < lowestIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
|
@ -1270,16 +1267,17 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
}
|
||||
{ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
mlCode = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
|
||||
while ((ip>anchor) && (match>lowMatchPtr) && (ip[-1] == match[-1])) { ip--; match--; mlCode++; } /* catch up */
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset = current - matchIndex;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mlCode-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mlCode;
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
|
@ -1335,8 +1333,6 @@ static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
|
|||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
|
@ -1435,7 +1431,7 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, co
|
|||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
|
||||
if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
|
||||
return 1;
|
||||
}
|
||||
|
@ -1569,7 +1565,6 @@ static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const B
|
|||
}
|
||||
|
||||
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch_extDict (
|
||||
ZSTD_CCtx* zc,
|
||||
|
@ -1620,7 +1615,7 @@ U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
|
|||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
|
||||
while(idx < target) {
|
||||
while(idx < target) { /* catch up */
|
||||
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
|
||||
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
|
||||
hashTable[h] = idx;
|
||||
|
@ -1657,7 +1652,7 @@ size_t ZSTD_HcFindBestMatch_generic (
|
|||
/* HC4 match finder */
|
||||
U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
|
||||
|
||||
for ( ; (matchIndex>lowLimit) && (nbAttempts) ; nbAttempts--) {
|
||||
for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
|
||||
const BYTE* match;
|
||||
size_t currentMl=0;
|
||||
if ((!extDict) || matchIndex >= dictLimit) {
|
||||
|
@ -1671,7 +1666,7 @@ size_t ZSTD_HcFindBestMatch_generic (
|
|||
}
|
||||
|
||||
/* save best solution */
|
||||
if (currentMl > ml) { ml = currentMl; *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
|
||||
if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
|
||||
|
||||
if (matchIndex <= minChain) break;
|
||||
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
|
||||
|
@ -1844,7 +1839,7 @@ _storeSequence:
|
|||
/* Save reps for next block */
|
||||
{ int i;
|
||||
for (i=0; i<ZSTD_REP_NUM; i++) {
|
||||
if (!rep[i]) rep[i] = (U32)(iend - ctx->base); /* in case some zero are left */
|
||||
if (!rep[i]) rep[i] = (U32)(iend - ctx->base) + 1; /* in case some zero are left */
|
||||
ctx->savedRep[i] = rep[i];
|
||||
} }
|
||||
|
||||
|
@ -2238,7 +2233,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* zc,
|
|||
|
||||
/* preemptive overflow correction */
|
||||
if (zc->lowLimit > (1<<30)) {
|
||||
U32 const btplus = (zc->params.cParams.strategy == ZSTD_btlazy2) || (zc->params.cParams.strategy == ZSTD_btopt);
|
||||
U32 const btplus = (zc->params.cParams.strategy == ZSTD_btlazy2) | (zc->params.cParams.strategy == ZSTD_btopt);
|
||||
U32 const chainMask = (1 << (zc->params.cParams.chainLog - btplus)) - 1;
|
||||
U32 const newLowLimit = zc->lowLimit & chainMask; /* preserve position % chainSize */
|
||||
U32 const correction = zc->lowLimit - newLowLimit;
|
||||
|
|
|
@ -749,7 +749,7 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
|||
if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&(seqState->DStream));
|
||||
|
||||
seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() ||
|
||||
if (MEM_32bits() |
|
||||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&(seqState->DStream));
|
||||
|
||||
/* ANS state update */
|
||||
|
@ -765,23 +765,22 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
|||
FORCE_INLINE
|
||||
size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_8,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_w,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
|
||||
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
|
||||
BYTE* const oend_8 = oend-8;
|
||||
BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
|
||||
const BYTE* match = oLitEnd - sequence.offset;
|
||||
|
||||
/* check */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (iLitEnd > litLimit_8) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit_w) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
|
||||
op = oLitEnd;
|
||||
*litPtr = iLitEnd; /* update for next sequence */
|
||||
|
||||
|
@ -821,10 +820,10 @@ size_t ZSTD_execSequence(BYTE* op,
|
|||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-(16-MINMATCH)) {
|
||||
if (op < oend_8) {
|
||||
ZSTD_wildcopy(op, match, oend_8 - op);
|
||||
match += oend_8 - op;
|
||||
op = oend_8;
|
||||
if (op < oend_w) {
|
||||
ZSTD_wildcopy(op, match, oend_w - op);
|
||||
match += oend_w - op;
|
||||
op = oend_w;
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
|
@ -845,7 +844,7 @@ static size_t ZSTD_decompressSequences(
|
|||
BYTE* const oend = ostart + maxDstSize;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8;
|
||||
const BYTE* const litLimit_w = litPtr + dctx->litBufSize - WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
FSE_DTable* DTableLL = dctx->LLTable;
|
||||
FSE_DTable* DTableML = dctx->MLTable;
|
||||
|
@ -875,7 +874,7 @@ static size_t ZSTD_decompressSequences(
|
|||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
|
||||
nbSeq--;
|
||||
{ seq_t const sequence = ZSTD_decodeSequence(&seqState);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litLimit_8, base, vBase, dictEnd);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litLimit_w, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
} }
|
||||
|
@ -888,8 +887,8 @@ static size_t ZSTD_decompressSequences(
|
|||
|
||||
/* last literal segment */
|
||||
{ size_t const lastLLSize = litEnd - litPtr;
|
||||
if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
|
||||
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
|
||||
//if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
|
||||
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
|
||||
memcpy(op, litPtr, lastLLSize);
|
||||
op += lastLLSize;
|
||||
}
|
||||
|
@ -1180,12 +1179,13 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
}
|
||||
|
||||
|
||||
static void ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
|
||||
static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
|
||||
{
|
||||
dctx->dictEnd = dctx->previousDstEnd;
|
||||
dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
|
||||
dctx->base = dict;
|
||||
dctx->previousDstEnd = (const char*)dict + dictSize;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t const dictSize)
|
||||
|
@ -1237,29 +1237,24 @@ static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t c
|
|||
|
||||
static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
|
||||
{
|
||||
if (dictSize < 8) return ERROR(dictionary_corrupted);
|
||||
if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
|
||||
{ U32 const magic = MEM_readLE32(dict);
|
||||
if (magic != ZSTD_DICT_MAGIC) {
|
||||
/* pure content mode */
|
||||
ZSTD_refDictContent(dctx, dict, dictSize);
|
||||
return 0;
|
||||
}
|
||||
dctx->dictID = MEM_readLE32((const char*)dict + 4);
|
||||
return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
|
||||
} }
|
||||
dctx->dictID = MEM_readLE32((const char*)dict + 4);
|
||||
|
||||
/* load entropy tables */
|
||||
dict = (const char*)dict + 8;
|
||||
dictSize -= 8;
|
||||
{ size_t const eSize = ZSTD_loadEntropy(dctx, dict, dictSize);
|
||||
if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
|
||||
dict = (const char*)dict + eSize;
|
||||
dictSize -= eSize;
|
||||
}
|
||||
|
||||
/* reference dictionary content */
|
||||
ZSTD_refDictContent(dctx, dict, dictSize);
|
||||
|
||||
return 0;
|
||||
/* load entropy tables */
|
||||
dict = (const char*)dict + 8;
|
||||
dictSize -= 8;
|
||||
{ size_t const eSize = ZSTD_loadEntropy(dctx, dict, dictSize);
|
||||
if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
|
||||
dict = (const char*)dict + eSize;
|
||||
dictSize -= eSize;
|
||||
}
|
||||
|
||||
/* reference dictionary content */
|
||||
return ZSTD_refDictContent(dctx, dict, dictSize);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1319,7 +1314,7 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_cu
|
|||
}
|
||||
|
||||
/*! ZSTD_createDDict() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* Create a digested dictionary, ready to start decompression without startup delay.
|
||||
* `dict` can be released after `ZSTD_DDict` creation */
|
||||
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
|
||||
{
|
||||
|
|
|
@ -584,48 +584,51 @@ static void ZDICT_countEStats(EStats_ress_t esr,
|
|||
U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const seqStore_t* seqStorePtr;
|
||||
size_t cSize;
|
||||
|
||||
if (srcSize > ZSTD_BLOCKSIZE_MAX) srcSize = ZSTD_BLOCKSIZE_MAX; /* protection vs large samples */
|
||||
ZSTD_copyCCtx(esr.zc, esr.ref);
|
||||
ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
|
||||
seqStorePtr = ZSTD_getSeqStore(esr.zc);
|
||||
cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
|
||||
if (ZSTD_isError(cSize)) { DISPLAYLEVEL(1, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
|
||||
|
||||
/* literals stats */
|
||||
{ const BYTE* bytePtr;
|
||||
for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
|
||||
countLit[*bytePtr]++;
|
||||
}
|
||||
if (cSize) { /* if == 0; block is not compressible */
|
||||
const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc);
|
||||
|
||||
/* seqStats */
|
||||
{ size_t const nbSeq = (size_t)(seqStorePtr->offset - seqStorePtr->offsetStart);
|
||||
ZSTD_seqToCodes(seqStorePtr, nbSeq);
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->offCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
|
||||
/* literals stats */
|
||||
{ const BYTE* bytePtr;
|
||||
for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
|
||||
countLit[*bytePtr]++;
|
||||
}
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->mlCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
|
||||
/* seqStats */
|
||||
{ size_t const nbSeq = (size_t)(seqStorePtr->offset - seqStorePtr->offsetStart);
|
||||
ZSTD_seqToCodes(seqStorePtr, nbSeq);
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->offCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
|
||||
}
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->mlCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
|
||||
}
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->llCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
|
||||
} }
|
||||
|
||||
/* rep offsets */
|
||||
{ const U32* const offsetPtr = seqStorePtr->offsetStart;
|
||||
U32 offset1 = offsetPtr[0] - 3;
|
||||
U32 offset2 = offsetPtr[1] - 3;
|
||||
if (offset1 >= MAXREPOFFSET) offset1 = 0;
|
||||
if (offset2 >= MAXREPOFFSET) offset2 = 0;
|
||||
repOffsets[offset1] += 3;
|
||||
repOffsets[offset2] += 1;
|
||||
}
|
||||
|
||||
{ const BYTE* codePtr = seqStorePtr->llCodeStart;
|
||||
size_t u;
|
||||
for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
|
||||
} }
|
||||
|
||||
/* rep offsets */
|
||||
{ const U32* const offsetPtr = seqStorePtr->offsetStart;
|
||||
U32 offset1 = offsetPtr[0] - 3;
|
||||
U32 offset2 = offsetPtr[1] - 3;
|
||||
if (offset1 >= MAXREPOFFSET) offset1 = 0;
|
||||
if (offset2 >= MAXREPOFFSET) offset2 = 0;
|
||||
repOffsets[offset1] += 3;
|
||||
repOffsets[offset2] += 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -137,7 +137,7 @@ static U32 g_sparseFileSupport = 1; /* 0 : no sparse allowed; 1: auto (file ye
|
|||
void FIO_setSparseWrite(unsigned sparse) { g_sparseFileSupport=sparse; }
|
||||
static U32 g_dictIDFlag = 1;
|
||||
void FIO_setDictIDFlag(unsigned dictIDFlag) { g_dictIDFlag = dictIDFlag; }
|
||||
static U32 g_checksumFlag = 0;
|
||||
static U32 g_checksumFlag = 1;
|
||||
void FIO_setChecksumFlag(unsigned checksumFlag) { g_checksumFlag = checksumFlag; }
|
||||
static U32 g_removeSrcFile = 0;
|
||||
void FIO_setRemoveSrcFile(unsigned flag) { g_removeSrcFile = (flag>0); }
|
||||
|
@ -413,7 +413,7 @@ static int FIO_compressFilename_dstFile(cRess_t ress,
|
|||
int result;
|
||||
|
||||
ress.dstFile = FIO_openDstFile(dstFileName);
|
||||
if (ress.dstFile==0) { fclose(ress.srcFile); return 1; }
|
||||
if (ress.dstFile==0) return 1;
|
||||
|
||||
result = FIO_compressFilename_srcFile(ress, dstFileName, srcFileName, cLevel);
|
||||
|
||||
|
|
|
@ -22,10 +22,12 @@ roundTripTest() {
|
|||
|
||||
isWindows=false
|
||||
ECHO="echo"
|
||||
INTOVOID="/dev/null"
|
||||
case "$OS" in
|
||||
Windows*)
|
||||
isWindows=true
|
||||
ECHO="echo -e"
|
||||
INTOVOID="nul"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@ -53,19 +55,21 @@ $ECHO "test : null-length file roundtrip"
|
|||
$ECHO -n '' | $ZSTD - --stdout | $ZSTD -d --stdout
|
||||
$ECHO "test : decompress file with wrong suffix (must fail)"
|
||||
$ZSTD -d tmpCompressed && die "wrong suffix error not detected!"
|
||||
$ZSTD -df tmp && die "should have refused : wrong extension"
|
||||
$ECHO "test : decompress into stdout"
|
||||
$ZSTD -d tmpCompressed -c > tmpResult # decompression using stdout
|
||||
$ZSTD --decompress tmpCompressed -c > tmpResult
|
||||
$ZSTD --decompress tmpCompressed --stdout > tmpResult
|
||||
if [ "$isWindows" = false ] ; then
|
||||
$ZSTD -d < tmp.zst > /dev/null # combine decompression, stdin & stdout
|
||||
$ZSTD -d - < tmp.zst > /dev/null
|
||||
fi
|
||||
$ZSTD -dc < tmp.zst > /dev/null
|
||||
$ZSTD -dc - < tmp.zst > /dev/null
|
||||
$ECHO "test : decompress from stdin into stdout"
|
||||
$ZSTD -dc < tmp.zst > $INTOVOID # combine decompression, stdin & stdout
|
||||
$ZSTD -dc - < tmp.zst > $INTOVOID
|
||||
$ZSTD -d < tmp.zst > $INTOVOID # implicit stdout when stdin is used
|
||||
$ZSTD -d - < tmp.zst > $INTOVOID
|
||||
$ECHO "test : overwrite protection"
|
||||
$ZSTD -q tmp && die "overwrite check failed!"
|
||||
$ECHO "test : force overwrite"
|
||||
$ZSTD -q -f tmp
|
||||
$ZSTD -q --force tmp
|
||||
$ZSTD -df tmp && die "should have refused : wrong extension"
|
||||
$ECHO "test : file removal"
|
||||
$ZSTD -f --rm tmp
|
||||
ls tmp && die "tmp should no longer be present"
|
||||
|
@ -135,9 +139,9 @@ rm tmpSparse*
|
|||
|
||||
$ECHO "\n**** multiple files tests **** "
|
||||
|
||||
./datagen -s1 > tmp1 2> /dev/null
|
||||
./datagen -s2 -g100K > tmp2 2> /dev/null
|
||||
./datagen -s3 -g1M > tmp3 2> /dev/null
|
||||
./datagen -s1 > tmp1 2> $INTOVOID
|
||||
./datagen -s2 -g100K > tmp2 2> $INTOVOID
|
||||
./datagen -s3 -g1M > tmp3 2> $INTOVOID
|
||||
$ZSTD -f tmp*
|
||||
$ECHO "compress tmp* : "
|
||||
ls -ls tmp*
|
||||
|
|
|
@ -36,8 +36,14 @@
|
|||
#include <stdio.h> /* fprintf */
|
||||
#include <sys/types.h> /* stat */
|
||||
#include <sys/stat.h> /* stat */
|
||||
#include "xxhash.h"
|
||||
#include "zstd.h"
|
||||
|
||||
/*===========================================
|
||||
* Macros
|
||||
*==========================================*/
|
||||
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
|
||||
|
||||
/** roundTripTest() :
|
||||
* Compresses `srcBuff` into `compressedBuff`,
|
||||
* then decompresses `compressedBuff` into `resultBuff`.
|
||||
|
@ -51,7 +57,9 @@ static size_t roundTripTest(void* resultBuff, size_t resultBuffCapacity,
|
|||
const void* srcBuff, size_t srcBuffSize)
|
||||
{
|
||||
static const int maxClevel = 19;
|
||||
int const cLevel = (!srcBuffSize) ? 1 : (*(const unsigned char*)srcBuff) % maxClevel;
|
||||
size_t const hashLength = MIN(128, srcBuffSize);
|
||||
unsigned const h32 = XXH32(srcBuff, hashLength, 0);
|
||||
int const cLevel = h32 % maxClevel;
|
||||
size_t const cSize = ZSTD_compress(compressedBuff, compressedBuffCapacity, srcBuff, srcBuffSize, cLevel);
|
||||
if (ZSTD_isError(cSize)) {
|
||||
fprintf(stderr, "Compression error : %s \n", ZSTD_getErrorName(cSize));
|
||||
|
|
|
@ -421,11 +421,17 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
|
|||
} }
|
||||
|
||||
/* final frame epilogue */
|
||||
{ size_t dstBuffSize = cBufferSize - cSize;
|
||||
size_t const flushError = ZBUFF_compressEnd(zc, cBuffer+cSize, &dstBuffSize);
|
||||
CHECK (ZBUFF_isError(flushError), "flush error : %s", ZBUFF_getErrorName(flushError));
|
||||
cSize += dstBuffSize;
|
||||
}
|
||||
{ size_t remainingToFlush = (size_t)(-1);
|
||||
while (remainingToFlush) {
|
||||
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
|
||||
size_t dstBuffSize = MIN(cBufferSize - cSize, randomDstSize);
|
||||
U32 const enoughDstSize = dstBuffSize >= remainingToFlush;
|
||||
remainingToFlush = ZBUFF_compressEnd(zc, cBuffer+cSize, &dstBuffSize);
|
||||
CHECK (ZBUFF_isError(remainingToFlush), "flush error : %s", ZBUFF_getErrorName(remainingToFlush));
|
||||
//DISPLAY("flush %u bytes : still within context : %i \n", (U32)dstBuffSize, (int)remainingToFlush);
|
||||
CHECK (enoughDstSize && remainingToFlush, "ZBUFF_compressEnd() not fully flushed, but enough space available");
|
||||
cSize += dstBuffSize;
|
||||
} }
|
||||
crcOrig = XXH64_digest(&xxhState);
|
||||
|
||||
/* multi - fragments decompression test */
|
||||
|
|
|
@ -136,7 +136,7 @@ static int usage_advanced(const char* programName)
|
|||
#ifndef ZSTD_NOCOMPRESS
|
||||
DISPLAY( "--ultra : enable ultra modes (requires more memory to decompress)\n");
|
||||
DISPLAY( "--no-dictID : don't write dictID into header (dictionary compression)\n");
|
||||
DISPLAY( "--check : enable integrity check\n");
|
||||
DISPLAY( "--[no-]check : integrity check (default:enabled)\n");
|
||||
#endif
|
||||
#ifndef ZSTD_NODECOMPRESS
|
||||
DISPLAY( "--test : test compressed file integrity \n");
|
||||
|
@ -257,6 +257,7 @@ int main(int argCount, const char** argv)
|
|||
if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; displayLevel=1; continue; }
|
||||
if (!strcmp(argument, "--ultra")) { FIO_setMaxWLog(0); continue; }
|
||||
if (!strcmp(argument, "--check")) { FIO_setChecksumFlag(2); continue; }
|
||||
if (!strcmp(argument, "--no-check")) { FIO_setChecksumFlag(0); continue; }
|
||||
if (!strcmp(argument, "--no-dictID")) { FIO_setDictIDFlag(0); continue; }
|
||||
if (!strcmp(argument, "--sparse")) { FIO_setSparseWrite(2); continue; }
|
||||
if (!strcmp(argument, "--no-sparse")) { FIO_setSparseWrite(0); continue; }
|
||||
|
|
Loading…
Reference in New Issue