Merge branch 'dev' of github.com:facebook/zstd into dev
This commit is contained in:
commit
e45d82ab0b
@ -192,6 +192,8 @@ typedef struct {
|
||||
BYTE* llCode;
|
||||
BYTE* mlCode;
|
||||
BYTE* ofCode;
|
||||
size_t maxNbSeq;
|
||||
size_t maxNbLit;
|
||||
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
|
||||
U32 longLengthPos;
|
||||
} seqStore_t;
|
||||
|
@ -805,7 +805,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
|
||||
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
|
||||
U32 const divider = (cParams.searchLength==3) ? 3 : 4;
|
||||
size_t const maxNbSeq = blockSize / divider;
|
||||
size_t const tokenSpace = blockSize + 11*maxNbSeq;
|
||||
size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
|
||||
size_t const entropySpace = HUF_WORKSPACE_SIZE;
|
||||
size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
|
||||
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
|
||||
@ -931,33 +931,51 @@ typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
|
||||
/* ZSTD_sufficientBuff() :
|
||||
* check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
|
||||
* Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
|
||||
static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
|
||||
static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
|
||||
size_t maxNbLit1,
|
||||
ZSTD_buffered_policy_e buffPol2,
|
||||
ZSTD_compressionParameters cParams2,
|
||||
U64 pledgedSrcSize)
|
||||
{
|
||||
size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
|
||||
size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
|
||||
size_t const maxNbSeq2 = blockSize2 / ((cParams2.searchLength == 3) ? 3 : 4);
|
||||
size_t const maxNbLit2 = blockSize2;
|
||||
size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
|
||||
(U32)windowSize2, cParams2.windowLog);
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
|
||||
(U32)blockSize2, (U32)blockSize1);
|
||||
return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
|
||||
(U32)neededBufferSize2, (U32)bufferSize1);
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
|
||||
(U32)maxNbSeq2, (U32)maxNbSeq1);
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
|
||||
(U32)maxNbLit2, (U32)maxNbLit1);
|
||||
return (maxNbLit2 <= maxNbLit1)
|
||||
& (maxNbSeq2 <= maxNbSeq1)
|
||||
& (neededBufferSize2 <= bufferSize1);
|
||||
}
|
||||
|
||||
/** Equivalence for resetCCtx purposes */
|
||||
static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
|
||||
ZSTD_CCtx_params params2,
|
||||
size_t buffSize1, size_t blockSize1,
|
||||
size_t buffSize1,
|
||||
size_t maxNbSeq1, size_t maxNbLit1,
|
||||
ZSTD_buffered_policy_e buffPol2,
|
||||
U64 pledgedSrcSize)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
|
||||
return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
|
||||
ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
|
||||
ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
|
||||
if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
|
||||
DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
|
||||
return 0;
|
||||
}
|
||||
if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
|
||||
DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
|
||||
return 0;
|
||||
}
|
||||
if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
|
||||
params2.cParams, pledgedSrcSize)) {
|
||||
DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
|
||||
@ -1085,7 +1103,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
||||
|
||||
if (crp == ZSTDcrp_continue) {
|
||||
if (ZSTD_equivalentParams(zc->appliedParams, params,
|
||||
zc->inBuffSize, zc->blockSize,
|
||||
zc->inBuffSize,
|
||||
zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
|
||||
zbuff, pledgedSrcSize)) {
|
||||
DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
|
||||
zc->appliedParams.cParams.windowLog, zc->blockSize);
|
||||
@ -1107,7 +1126,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
||||
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
|
||||
U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
|
||||
size_t const maxNbSeq = blockSize / divider;
|
||||
size_t const tokenSpace = blockSize + 11*maxNbSeq;
|
||||
size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
|
||||
size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
|
||||
size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
|
||||
size_t const matchStateSize = ZSTD_sizeof_matchState(¶ms.cParams, /* forCCtx */ 1);
|
||||
@ -1197,13 +1216,18 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
||||
ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, ¶ms.cParams, crp, /* forCCtx */ 1);
|
||||
|
||||
/* sequences storage */
|
||||
zc->seqStore.maxNbSeq = maxNbSeq;
|
||||
zc->seqStore.sequencesStart = (seqDef*)ptr;
|
||||
ptr = zc->seqStore.sequencesStart + maxNbSeq;
|
||||
zc->seqStore.llCode = (BYTE*) ptr;
|
||||
zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
|
||||
zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
|
||||
zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
|
||||
ptr = zc->seqStore.litStart + blockSize;
|
||||
/* ZSTD_wildcopy() is used to copy into the literals buffer,
|
||||
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
|
||||
*/
|
||||
zc->seqStore.maxNbLit = blockSize;
|
||||
ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
|
||||
|
||||
/* ldm bucketOffsets table */
|
||||
if (params.ldmParams.enableLdm) {
|
||||
@ -1322,8 +1346,7 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
|
||||
}
|
||||
|
||||
/* copy dictionary offsets */
|
||||
{
|
||||
ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
|
||||
{ ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
|
||||
ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
|
||||
dstMatchState->window = srcMatchState->window;
|
||||
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
|
||||
@ -1647,6 +1670,7 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
|
||||
BYTE* const mlCodeTable = seqStorePtr->mlCode;
|
||||
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
||||
U32 u;
|
||||
assert(nbSeq <= seqStorePtr->maxNbSeq);
|
||||
for (u=0; u<nbSeq; u++) {
|
||||
U32 const llv = sequences[u].litLength;
|
||||
U32 const mlv = sequences[u].matchLength;
|
||||
@ -2235,13 +2259,6 @@ MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
|
||||
if (cSize >= maxCSize) return 0; /* block not compressed */
|
||||
}
|
||||
|
||||
/* We check that dictionaries have offset codes available for the first
|
||||
* block. After the first block, the offcode table might not have large
|
||||
* enough codes to represent the offsets in the data.
|
||||
*/
|
||||
if (nextEntropy->fse.offcode_repeatMode == FSE_repeat_valid)
|
||||
nextEntropy->fse.offcode_repeatMode = FSE_repeat_check;
|
||||
|
||||
return cSize;
|
||||
}
|
||||
|
||||
@ -2380,12 +2397,20 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
|
||||
&zc->appliedParams,
|
||||
dst, dstCapacity,
|
||||
srcSize, zc->entropyWorkspace, zc->bmi2);
|
||||
if (ZSTD_isError(cSize) || cSize == 0) return cSize;
|
||||
if (!ZSTD_isError(cSize) && cSize != 0) {
|
||||
/* confirm repcodes and entropy tables */
|
||||
{ ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
|
||||
ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
|
||||
zc->blockState.prevCBlock = zc->blockState.nextCBlock;
|
||||
zc->blockState.nextCBlock = tmp;
|
||||
}
|
||||
|
||||
/* We check that dictionaries have offset codes available for the first
|
||||
* block. After the first block, the offcode table might not have large
|
||||
* enough codes to represent the offsets in the data.
|
||||
*/
|
||||
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
|
||||
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
|
||||
|
||||
return cSize;
|
||||
}
|
||||
}
|
||||
|
@ -314,8 +314,10 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
|
||||
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
|
||||
}
|
||||
#endif
|
||||
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
|
||||
/* copy Literals */
|
||||
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
|
||||
assert(seqStorePtr->maxNbLit <= 128 KB);
|
||||
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
|
||||
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
|
||||
seqStorePtr->lit += litLength;
|
||||
|
||||
|
@ -987,8 +987,10 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(
|
||||
U32 const pos = dictList[u].pos;
|
||||
U32 const length = dictList[u].length;
|
||||
U32 const printedLength = MIN(40, length);
|
||||
if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize))
|
||||
if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
|
||||
free(dictList);
|
||||
return ERROR(GENERIC); /* should never happen */
|
||||
}
|
||||
DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
|
||||
u, length, pos, dictList[u].savings);
|
||||
ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
|
||||
|
@ -211,7 +211,8 @@ typedef struct ZSTD_CDict_s ZSTD_CDict;
|
||||
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
|
||||
* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
||||
* ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
||||
* `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */
|
||||
* `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict
|
||||
* Note : A ZSTD_CDict can be created with an empty dictionary, but it is inefficient for small data. */
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
|
||||
int compressionLevel);
|
||||
|
||||
@ -223,7 +224,9 @@ ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
|
||||
* Compression using a digested Dictionary.
|
||||
* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
|
||||
* Note that compression level is decided during dictionary creation.
|
||||
* Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
|
||||
* Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)
|
||||
* Note : ZSTD_compress_usingCDict() can be used with a ZSTD_CDict created from an empty dictionary.
|
||||
* But it is inefficient for small data, and it is recommended to use ZSTD_compressCCtx(). */
|
||||
ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
|
824
programs/bench.c
824
programs/bench.c
File diff suppressed because it is too large
Load Diff
308
programs/bench.h
308
programs/bench.h
@ -15,59 +15,82 @@ extern "C" {
|
||||
#ifndef BENCH_H_121279284357
|
||||
#define BENCH_H_121279284357
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
|
||||
#include "zstd.h" /* ZSTD_compressionParameters */
|
||||
|
||||
/* Creates a struct of type typeName with an int type .error field
|
||||
* and a .result field of some baseType. Functions with return
|
||||
* typeName pass a successful result with .error = 0 and .result
|
||||
* with the intended result, while returning an error will result
|
||||
* in .error != 0.
|
||||
|
||||
/* === Constants === */
|
||||
|
||||
#define MB_UNIT 1000000
|
||||
|
||||
|
||||
/* === Benchmark functions === */
|
||||
|
||||
/* Creates a variant `typeName`, able to express "error or valid result".
|
||||
* Functions with return type `typeName`
|
||||
* must first check if result is valid, using BMK_isSuccessful_*(),
|
||||
* and only then can extract `baseType`.
|
||||
*/
|
||||
#define ERROR_STRUCT(baseType, typeName) typedef struct { \
|
||||
baseType result; \
|
||||
int error; \
|
||||
} typeName
|
||||
#define VARIANT_ERROR_RESULT(baseType, variantName) \
|
||||
\
|
||||
typedef struct { \
|
||||
baseType internal_never_use_directly; \
|
||||
int tag; \
|
||||
} variantName
|
||||
|
||||
|
||||
typedef struct {
|
||||
size_t cSize;
|
||||
U64 cSpeed; /* bytes / sec */
|
||||
U64 dSpeed;
|
||||
size_t cMem;
|
||||
} BMK_result_t;
|
||||
unsigned long long cSpeed; /* bytes / sec */
|
||||
unsigned long long dSpeed;
|
||||
size_t cMem; /* ? what is reported ? */
|
||||
} BMK_benchResult_t;
|
||||
|
||||
ERROR_STRUCT(BMK_result_t, BMK_return_t);
|
||||
VARIANT_ERROR_RESULT(BMK_benchResult_t, BMK_benchOutcome_t);
|
||||
|
||||
/* called in cli */
|
||||
/* Loads files in fileNamesTable into memory, as well as a dictionary
|
||||
* from dictFileName, and then uses benchMem */
|
||||
/* fileNamesTable - name of files to benchmark
|
||||
* nbFiles - number of files (size of fileNamesTable), must be > 0
|
||||
* dictFileName - name of dictionary file to load
|
||||
* cLevel - compression level to benchmark, errors if invalid
|
||||
* compressionParams - basic compression Parameters
|
||||
* displayLevel - what gets printed
|
||||
/* check first if the return structure represents an error or a valid result */
|
||||
int BMK_isSuccessful_benchOutcome(BMK_benchOutcome_t outcome);
|
||||
|
||||
/* extract result from variant type.
|
||||
* note : this function will abort() program execution if result is not valid
|
||||
* check result validity first, by using BMK_isSuccessful_benchOutcome()
|
||||
*/
|
||||
BMK_benchResult_t BMK_extract_benchResult(BMK_benchOutcome_t outcome);
|
||||
|
||||
|
||||
/*! BMK_benchFiles() -- called by zstdcli */
|
||||
/* Loads files from fileNamesTable into memory,
|
||||
* and an optional dictionary from dictFileName (can be NULL),
|
||||
* then uses benchMem().
|
||||
* fileNamesTable - name of files to benchmark.
|
||||
* nbFiles - number of files (size of fileNamesTable), must be > 0.
|
||||
* dictFileName - name of dictionary file to load.
|
||||
* cLevel - compression level to benchmark, errors if invalid.
|
||||
* compressionParams - advanced compression Parameters.
|
||||
* displayLevel - what gets printed:
|
||||
* 0 : no display;
|
||||
* 1 : errors;
|
||||
* 2 : + result + interaction + warnings;
|
||||
* 3 : + progression;
|
||||
* 4 : + information
|
||||
* return
|
||||
* .error will give a nonzero error value if an error has occured
|
||||
* .result - if .error = 0, .result will return the time taken to compression speed
|
||||
* (.cSpeed), decompression speed (.dSpeed), and compressed size (.cSize) of the original
|
||||
* file
|
||||
* 3 : + information;
|
||||
* 4 : + debug
|
||||
* @return:
|
||||
* a variant, which expresses either an error, or a valid result.
|
||||
* Use BMK_isSuccessful_benchOutcome() to check if function was successful.
|
||||
* If yes, extract the valid result with BMK_extract_benchResult(),
|
||||
* it will contain :
|
||||
* .cSpeed: compression speed in bytes per second,
|
||||
* .dSpeed: decompression speed in bytes per second,
|
||||
* .cSize : compressed size, in bytes
|
||||
* .cMem : memory budget required for the compression context
|
||||
*/
|
||||
BMK_return_t BMK_benchFiles(const char* const * const fileNamesTable, unsigned const nbFiles,
|
||||
const char* const dictFileName,
|
||||
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
|
||||
BMK_benchOutcome_t BMK_benchFiles(
|
||||
const char* const * fileNamesTable, unsigned nbFiles,
|
||||
const char* dictFileName,
|
||||
int cLevel, const ZSTD_compressionParameters* compressionParams,
|
||||
int displayLevel);
|
||||
|
||||
typedef enum {
|
||||
BMK_timeMode = 0,
|
||||
BMK_iterMode = 1
|
||||
} BMK_loopMode_t;
|
||||
|
||||
typedef enum {
|
||||
BMK_both = 0,
|
||||
@ -77,14 +100,13 @@ typedef enum {
|
||||
|
||||
typedef struct {
|
||||
BMK_mode_t mode; /* 0: all, 1: compress only 2: decode only */
|
||||
BMK_loopMode_t loopMode; /* if loopmode, then nbSeconds = nbLoops */
|
||||
unsigned nbSeconds; /* default timing is in nbSeconds */
|
||||
size_t blockSize; /* Maximum allowable size of a block*/
|
||||
size_t blockSize; /* Maximum size of each block*/
|
||||
unsigned nbWorkers; /* multithreading */
|
||||
unsigned realTime; /* real time priority */
|
||||
int additionalParam; /* used by python speed benchmark */
|
||||
unsigned ldmFlag; /* enables long distance matching */
|
||||
unsigned ldmMinMatch; /* below: parameters for long distance matching, see zstd.1.md for meaning */
|
||||
unsigned ldmMinMatch; /* below: parameters for long distance matching, see zstd.1.md */
|
||||
unsigned ldmHashLog;
|
||||
unsigned ldmBucketSizeLog;
|
||||
unsigned ldmHashEveryLog;
|
||||
@ -93,132 +115,186 @@ typedef struct {
|
||||
/* returns default parameters used by nonAdvanced functions */
|
||||
BMK_advancedParams_t BMK_initAdvancedParams(void);
|
||||
|
||||
/* See benchFiles for normal parameter uses and return, see advancedParams_t for adv */
|
||||
BMK_return_t BMK_benchFilesAdvanced(const char* const * const fileNamesTable, unsigned const nbFiles,
|
||||
const char* const dictFileName,
|
||||
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
|
||||
int displayLevel, const BMK_advancedParams_t* const adv);
|
||||
/*! BMK_benchFilesAdvanced():
|
||||
* Same as BMK_benchFiles(),
|
||||
* with more controls, provided through advancedParams_t structure */
|
||||
BMK_benchOutcome_t BMK_benchFilesAdvanced(
|
||||
const char* const * fileNamesTable, unsigned nbFiles,
|
||||
const char* dictFileName,
|
||||
int cLevel, const ZSTD_compressionParameters* compressionParams,
|
||||
int displayLevel, const BMK_advancedParams_t* adv);
|
||||
|
||||
/* called in cli */
|
||||
/* Generates a sample with datagen with the compressibility argument*/
|
||||
/*! BMK_syntheticTest() -- called from zstdcli */
|
||||
/* Generates a sample with datagen, using compressibility argument */
|
||||
/* cLevel - compression level to benchmark, errors if invalid
|
||||
* compressibility - determines compressibility of sample
|
||||
* compressionParams - basic compression Parameters
|
||||
* displayLevel - see benchFiles
|
||||
* adv - see advanced_Params_t
|
||||
* return
|
||||
* .error will give a nonzero error value if an error has occured
|
||||
* .result - if .error = 0, .result will return the time taken to compression speed
|
||||
* (.cSpeed), decompression speed (.dSpeed), and compressed size (.cSize) of the original
|
||||
* file
|
||||
* @return:
|
||||
* a variant, which expresses either an error, or a valid result.
|
||||
* Use BMK_isSuccessful_benchOutcome() to check if function was successful.
|
||||
* If yes, extract the valid result with BMK_extract_benchResult(),
|
||||
* it will contain :
|
||||
* .cSpeed: compression speed in bytes per second,
|
||||
* .dSpeed: decompression speed in bytes per second,
|
||||
* .cSize : compressed size, in bytes
|
||||
* .cMem : memory budget required for the compression context
|
||||
*/
|
||||
BMK_return_t BMK_syntheticTest(int cLevel, double compressibility,
|
||||
BMK_benchOutcome_t BMK_syntheticTest(
|
||||
int cLevel, double compressibility,
|
||||
const ZSTD_compressionParameters* compressionParams,
|
||||
int displayLevel, const BMK_advancedParams_t * const adv);
|
||||
int displayLevel, const BMK_advancedParams_t* adv);
|
||||
|
||||
/* basic benchmarking function, called in paramgrill
|
||||
|
||||
|
||||
/* === Benchmark Zstandard in a memory-to-memory scenario === */
|
||||
|
||||
/** BMK_benchMem() -- core benchmarking function, called in paramgrill
|
||||
* applies ZSTD_compress_generic() and ZSTD_decompress_generic() on data in srcBuffer
|
||||
* with specific compression parameters specified by other arguments using benchFunction
|
||||
* with specific compression parameters provided by other arguments using benchFunction
|
||||
* (cLevel, comprParams + adv in advanced Mode) */
|
||||
/* srcBuffer - data source, expected to be valid compressed data if in Decode Only Mode
|
||||
* srcSize - size of data in srcBuffer
|
||||
* fileSizes - srcBuffer is considered cut into 1+ segments, to compress separately.
|
||||
* note : sum(fileSizes) must be == srcSize. (<== ensure it's properly checked)
|
||||
* nbFiles - nb of segments
|
||||
* cLevel - compression level
|
||||
* comprParams - basic compression parameters
|
||||
* dictBuffer - a dictionary if used, null otherwise
|
||||
* dictBufferSize - size of dictBuffer, 0 otherwise
|
||||
* diplayLevel - see BMK_benchFiles
|
||||
* displayName - name used by display
|
||||
* return
|
||||
* .error will give a nonzero value if an error has occured
|
||||
* .result - if .error = 0, will give the same results as benchFiles
|
||||
* but for the data stored in srcBuffer
|
||||
* @return:
|
||||
* a variant, which expresses either an error, or a valid result.
|
||||
* Use BMK_isSuccessful_benchOutcome() to check if function was successful.
|
||||
* If yes, extract the valid result with BMK_extract_benchResult(),
|
||||
* it will contain :
|
||||
* .cSpeed: compression speed in bytes per second,
|
||||
* .dSpeed: decompression speed in bytes per second,
|
||||
* .cSize : compressed size, in bytes
|
||||
* .cMem : memory budget required for the compression context
|
||||
*/
|
||||
BMK_return_t BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
BMK_benchOutcome_t BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
const size_t* fileSizes, unsigned nbFiles,
|
||||
const int cLevel, const ZSTD_compressionParameters* comprParams,
|
||||
int cLevel, const ZSTD_compressionParameters* comprParams,
|
||||
const void* dictBuffer, size_t dictBufferSize,
|
||||
int displayLevel, const char* displayName);
|
||||
|
||||
/* See benchMem for normal parameter uses and return, see advancedParams_t for adv
|
||||
/* BMK_benchMemAdvanced() : same as BMK_benchMem()
|
||||
* with following additional options :
|
||||
* dstBuffer - destination buffer to write compressed output in, NULL if none provided.
|
||||
* dstCapacity - capacity of destination buffer, give 0 if dstBuffer = NULL
|
||||
* adv = see advancedParams_t
|
||||
*/
|
||||
BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
|
||||
BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const size_t* fileSizes, unsigned nbFiles,
|
||||
const int cLevel, const ZSTD_compressionParameters* comprParams,
|
||||
int cLevel, const ZSTD_compressionParameters* comprParams,
|
||||
const void* dictBuffer, size_t dictBufferSize,
|
||||
int displayLevel, const char* displayName,
|
||||
const BMK_advancedParams_t* adv);
|
||||
|
||||
|
||||
|
||||
/* ==== Benchmarking any function, iterated on a set of blocks ==== */
|
||||
|
||||
typedef struct {
|
||||
unsigned long long nanoSecPerRun; /* time per iteration */
|
||||
size_t sumOfReturn; /* sum of return values */
|
||||
U64 nanoSecPerRun; /* time per iteration */
|
||||
} BMK_customResult_t;
|
||||
} BMK_runTime_t;
|
||||
|
||||
ERROR_STRUCT(BMK_customResult_t, BMK_customReturn_t);
|
||||
VARIANT_ERROR_RESULT(BMK_runTime_t, BMK_runOutcome_t);
|
||||
|
||||
typedef size_t (*BMK_benchFn_t)(const void*, size_t, void*, size_t, void*);
|
||||
typedef size_t (*BMK_initFn_t)(void*);
|
||||
/* check first if the return structure represents an error or a valid result */
|
||||
int BMK_isSuccessful_runOutcome(BMK_runOutcome_t outcome);
|
||||
|
||||
/* This function times the execution of 2 argument functions, benchFn and initFn */
|
||||
/* extract result from variant type.
|
||||
* note : this function will abort() program execution if result is not valid
|
||||
* check result validity first, by using BMK_isSuccessful_runOutcome()
|
||||
*/
|
||||
BMK_runTime_t BMK_extract_runTime(BMK_runOutcome_t outcome);
|
||||
|
||||
|
||||
|
||||
typedef size_t (*BMK_benchFn_t)(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* customPayload);
|
||||
typedef size_t (*BMK_initFn_t)(void* initPayload);
|
||||
|
||||
|
||||
/* BMK_benchFunction() :
|
||||
* This function times the execution of 2 argument functions, benchFn and initFn */
|
||||
|
||||
/* benchFn - (*benchFn)(srcBuffers[i], srcSizes[i], dstBuffers[i], dstCapacities[i], benchPayload)
|
||||
* is run nbLoops times
|
||||
* initFn - (*initFn)(initPayload) is run once per benchmark at the beginning. This argument can
|
||||
* be NULL, in which case nothing is run.
|
||||
* blockCount - number of blocks (size of srcBuffers, srcSizes, dstBuffers, dstCapacities)
|
||||
* initFn - (*initFn)(initPayload) is run once per benchmark, at the beginning.
|
||||
* This argument can be NULL, in which case nothing is run.
|
||||
* blockCount - number of blocks. Size of all array parameters : srcBuffers, srcSizes, dstBuffers, dstCapacities, blockResults
|
||||
* srcBuffers - an array of buffers to be operated on by benchFn
|
||||
* srcSizes - an array of the sizes of above buffers
|
||||
* dstBuffers - an array of buffers to be written into by benchFn
|
||||
* dstCapacities - an array of the capacities of above buffers
|
||||
* blockResults - the return value of benchFn called on each block.
|
||||
* blockResults - store the return value of benchFn for each block. Optional. Use NULL if this result is not requested.
|
||||
* nbLoops - defines number of times benchFn is run.
|
||||
* assumed array of size blockCount, will have compressed size of each block written to it.
|
||||
* return
|
||||
* .error will give a nonzero value if ZSTD_isError() is nonzero for any of the return
|
||||
* of the calls to initFn and benchFn, or if benchFunction errors internally
|
||||
* .result - if .error = 0, then .result will contain the sum of all return values of
|
||||
* benchFn on the first iteration through all of the blocks (.sumOfReturn) and also
|
||||
* the time per run of benchFn (.nanoSecPerRun). For the former, this
|
||||
* is generally intended to be used on functions which return the # of bytes written
|
||||
* into dstBuffer, hence this value will be the total amount of bytes written to
|
||||
* dstBuffer.
|
||||
* @return: a variant, which express either an error, or can generate a valid BMK_runTime_t result.
|
||||
* Use BMK_isSuccessful_runOutcome() to check if function was successful.
|
||||
* If yes, extract the result with BMK_extract_runTime(),
|
||||
* it will contain :
|
||||
* .sumOfReturn : the sum of all return values of benchFn through all of blocks
|
||||
* .nanoSecPerRun : time per run of benchFn + (time for initFn / nbLoops)
|
||||
* .sumOfReturn is generally intended for functions which return a # of bytes written into dstBuffer,
|
||||
* in which case, this value will be the total amount of bytes written into dstBuffer.
|
||||
*/
|
||||
BMK_customReturn_t BMK_benchFunction(BMK_benchFn_t benchFn, void* benchPayload,
|
||||
BMK_initFn_t initFn, void* initPayload,
|
||||
size_t blockCount,
|
||||
const void* const * const srcBuffers, const size_t* srcSizes,
|
||||
void * const * const dstBuffers, const size_t* dstCapacities, size_t* blockResults,
|
||||
unsigned nbLoops);
|
||||
|
||||
|
||||
/* state information needed to advance computation for benchFunctionTimed */
|
||||
typedef struct BMK_timeState_t BMK_timedFnState_t;
|
||||
/* initializes timeState object with desired number of seconds */
|
||||
BMK_timedFnState_t* BMK_createTimeState(unsigned nbSeconds);
|
||||
/* resets existing timeState object */
|
||||
void BMK_resetTimeState(BMK_timedFnState_t*, unsigned nbSeconds);
|
||||
/* deletes timeState object */
|
||||
void BMK_freeTimeState(BMK_timedFnState_t* state);
|
||||
|
||||
typedef struct {
|
||||
BMK_customReturn_t result;
|
||||
int completed;
|
||||
} BMK_customTimedReturn_t;
|
||||
|
||||
/*
|
||||
* Benchmarks custom functions like BMK_benchFunction(), but runs for nbSeconds seconds rather than a fixed number of loops
|
||||
* arguments mostly the same other than BMK_benchFunction()
|
||||
* Usage - benchFunctionTimed will return in approximately one second. Keep calling BMK_benchFunctionTimed() until the return's completed field = 1.
|
||||
* to continue updating intermediate result. Intermediate return values are returned by the function.
|
||||
*/
|
||||
BMK_customTimedReturn_t BMK_benchFunctionTimed(BMK_timedFnState_t* cont,
|
||||
BMK_runOutcome_t BMK_benchFunction(
|
||||
BMK_benchFn_t benchFn, void* benchPayload,
|
||||
BMK_initFn_t initFn, void* initPayload,
|
||||
size_t blockCount,
|
||||
const void* const * const srcBlockBuffers, const size_t* srcBlockSizes,
|
||||
void* const * const dstBlockBuffers, const size_t* dstBlockCapacities, size_t* blockResults);
|
||||
const void *const * srcBuffers, const size_t* srcSizes,
|
||||
void *const * dstBuffers, const size_t* dstCapacities,
|
||||
size_t* blockResults,
|
||||
unsigned nbLoops);
|
||||
|
||||
|
||||
|
||||
/* ==== Benchmark any function, providing intermediate results ==== */
|
||||
|
||||
/* state information tracking benchmark session */
|
||||
typedef struct BMK_timedFnState_s BMK_timedFnState_t;
|
||||
|
||||
/* BMK_createTimedFnState() and BMK_resetTimedFnState() :
|
||||
* Create/Set BMK_timedFnState_t for next benchmark session,
|
||||
* which shall last a minimum of total_ms milliseconds,
|
||||
* producing intermediate results, paced at interval of (approximately) run_ms.
|
||||
*/
|
||||
BMK_timedFnState_t* BMK_createTimedFnState(unsigned total_ms, unsigned run_ms);
|
||||
void BMK_resetTimedFnState(BMK_timedFnState_t* timedFnState, unsigned total_ms, unsigned run_ms);
|
||||
void BMK_freeTimedFnState(BMK_timedFnState_t* state);
|
||||
|
||||
|
||||
/* Tells if duration of all benchmark runs has exceeded total_ms
|
||||
*/
|
||||
int BMK_isCompleted_TimedFn(const BMK_timedFnState_t* timedFnState);
|
||||
|
||||
|
||||
/* BMK_benchTimedFn() :
|
||||
* Similar to BMK_benchFunction(), most arguments being identical.
|
||||
* Automatically determines `nbLoops` so that each result is regularly produced at interval of about run_ms.
|
||||
* Note : minimum `nbLoops` is 1, therefore a run may last more than run_ms, and possibly even more than total_ms.
|
||||
* Usage - initialize timedFnState, select benchmark duration (total_ms) and each measurement duration (run_ms)
|
||||
* call BMK_benchTimedFn() repetitively, each measurement is supposed to last about run_ms
|
||||
* Check if total time budget is spent or exceeded, using BMK_isCompleted_TimedFn()
|
||||
*/
|
||||
BMK_runOutcome_t BMK_benchTimedFn(
|
||||
BMK_timedFnState_t* timedFnState,
|
||||
BMK_benchFn_t benchFn, void* benchPayload,
|
||||
BMK_initFn_t initFn, void* initPayload,
|
||||
size_t blockCount,
|
||||
const void *const * srcBlockBuffers, const size_t* srcBlockSizes,
|
||||
void *const * dstBlockBuffers, const size_t* dstBlockCapacities,
|
||||
size_t* blockResults);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* BENCH_H_121279284357 */
|
||||
|
||||
|
@ -901,13 +901,13 @@ int main(int argCount, const char* argv[])
|
||||
if (cLevelLast > ZSTD_maxCLevel()) cLevelLast = ZSTD_maxCLevel();
|
||||
if (cLevelLast < cLevel) cLevelLast = cLevel;
|
||||
if (cLevelLast > cLevel)
|
||||
DISPLAYLEVEL(2, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast);
|
||||
DISPLAYLEVEL(3, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast);
|
||||
if(filenameIdx) {
|
||||
if(separateFiles) {
|
||||
unsigned i;
|
||||
for(i = 0; i < filenameIdx; i++) {
|
||||
int c;
|
||||
DISPLAYLEVEL(2, "Benchmarking %s \n", filenameTable[i]);
|
||||
DISPLAYLEVEL(3, "Benchmarking %s \n", filenameTable[i]);
|
||||
for(c = cLevel; c <= cLevelLast; c++) {
|
||||
BMK_benchFilesAdvanced(&filenameTable[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams);
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ zstreamtest-dll : $(ZSTDDIR)/common/xxhash.c # xxh symbols not exposed from dll
|
||||
zstreamtest-dll : $(ZSTREAM_LOCAL_FILES)
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) $(filter %.c,$^) $(LDFLAGS) -o $@$(EXT)
|
||||
|
||||
paramgrill : DEBUGFLAGS = -DNDEBUG # turn off assert() for speed measurements
|
||||
paramgrill : DEBUGFLAGS = # turn off assert() by default for speed measurements
|
||||
paramgrill : $(ZSTD_FILES) $(PRGDIR)/bench.c $(PRGDIR)/datagen.c paramgrill.c
|
||||
$(CC) $(FLAGS) $^ -lm -o $@$(EXT)
|
||||
|
||||
|
@ -620,6 +620,8 @@ static size_t writeLiteralsBlock(U32* seed, frame_t* frame, size_t contentSize)
|
||||
}
|
||||
|
||||
static inline void initSeqStore(seqStore_t *seqStore) {
|
||||
seqStore->maxNbSeq = MAX_NB_SEQ;
|
||||
seqStore->maxNbLit = ZSTD_BLOCKSIZE_MAX;
|
||||
seqStore->sequencesStart = SEQUENCE_BUFFER;
|
||||
seqStore->litStart = SEQUENCE_LITERAL_BUFFER;
|
||||
seqStore->llCode = SEQUENCE_LLCODE;
|
||||
|
@ -51,6 +51,8 @@
|
||||
#define COMPRESSIBILITY_DEFAULT 0.50
|
||||
static const size_t g_sampleSize = 10000000;
|
||||
|
||||
#define TIMELOOP_NANOSEC (1*1000000000ULL) /* 1 second */
|
||||
|
||||
|
||||
/*_************************************
|
||||
* Macros
|
||||
@ -92,52 +94,17 @@ static size_t BMK_findMaxMem(U64 requiredMem)
|
||||
return (size_t) requiredMem;
|
||||
}
|
||||
|
||||
/*_*******************************************************
|
||||
* Argument Parsing
|
||||
*********************************************************/
|
||||
|
||||
#define ERROR_OUT(msg) { DISPLAY("%s \n", msg); exit(1); }
|
||||
|
||||
static unsigned readU32FromChar(const char** stringPtr)
|
||||
{
|
||||
const char errorMsg[] = "error: numeric value too large";
|
||||
unsigned result = 0;
|
||||
while ((**stringPtr >='0') && (**stringPtr <='9')) {
|
||||
unsigned const max = (((unsigned)(-1)) / 10) - 1;
|
||||
if (result > max) ERROR_OUT(errorMsg);
|
||||
result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;
|
||||
}
|
||||
if ((**stringPtr=='K') || (**stringPtr=='M')) {
|
||||
unsigned const maxK = ((unsigned)(-1)) >> 10;
|
||||
if (result > maxK) ERROR_OUT(errorMsg);
|
||||
result <<= 10;
|
||||
if (**stringPtr=='M') {
|
||||
if (result > maxK) ERROR_OUT(errorMsg);
|
||||
result <<= 10;
|
||||
}
|
||||
(*stringPtr)++; /* skip `K` or `M` */
|
||||
if (**stringPtr=='i') (*stringPtr)++;
|
||||
if (**stringPtr=='B') (*stringPtr)++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
|
||||
{
|
||||
size_t const comSize = strlen(longCommand);
|
||||
int const result = !strncmp(*stringPtr, longCommand, comSize);
|
||||
if (result) *stringPtr += comSize;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*_*******************************************************
|
||||
* Benchmark wrappers
|
||||
*********************************************************/
|
||||
|
||||
|
||||
static ZSTD_CCtx* g_zcc = NULL;
|
||||
|
||||
size_t local_ZSTD_compress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compress(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstSize,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_parameters p;
|
||||
ZSTD_frameParameters f = { 1 /* contentSizeHeader*/, 0, 0 };
|
||||
@ -148,7 +115,9 @@ size_t local_ZSTD_compress(const void* src, size_t srcSize, void* dst, size_t ds
|
||||
}
|
||||
|
||||
static size_t g_cSize = 0;
|
||||
size_t local_ZSTD_decompress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
|
||||
static size_t local_ZSTD_decompress(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstSize,
|
||||
void* buff2)
|
||||
{
|
||||
(void)src; (void)srcSize;
|
||||
return ZSTD_decompress(dst, dstSize, buff2, g_cSize);
|
||||
@ -174,7 +143,10 @@ size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, s
|
||||
#endif
|
||||
|
||||
static ZSTD_CStream* g_cstream= NULL;
|
||||
size_t local_ZSTD_compressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compressStream(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -194,7 +166,10 @@ size_t local_ZSTD_compressStream(const void* src, size_t srcSize, void* dst, siz
|
||||
return buffOut.pos;
|
||||
}
|
||||
|
||||
static size_t local_ZSTD_compress_generic_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compress_generic_end(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -209,7 +184,10 @@ static size_t local_ZSTD_compress_generic_end(const void* src, size_t srcSize, v
|
||||
return buffOut.pos;
|
||||
}
|
||||
|
||||
static size_t local_ZSTD_compress_generic_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compress_generic_continue(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -225,7 +203,10 @@ static size_t local_ZSTD_compress_generic_continue(const void* src, size_t srcSi
|
||||
return buffOut.pos;
|
||||
}
|
||||
|
||||
static size_t local_ZSTD_compress_generic_T2_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compress_generic_T2_end(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -241,7 +222,10 @@ static size_t local_ZSTD_compress_generic_T2_end(const void* src, size_t srcSize
|
||||
return buffOut.pos;
|
||||
}
|
||||
|
||||
static size_t local_ZSTD_compress_generic_T2_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_compress_generic_T2_continue(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -259,7 +243,10 @@ static size_t local_ZSTD_compress_generic_T2_continue(const void* src, size_t sr
|
||||
}
|
||||
|
||||
static ZSTD_DStream* g_dstream= NULL;
|
||||
static size_t local_ZSTD_decompressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
static size_t
|
||||
local_ZSTD_decompressStream(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_outBuffer buffOut;
|
||||
ZSTD_inBuffer buffIn;
|
||||
@ -276,7 +263,9 @@ static size_t local_ZSTD_decompressStream(const void* src, size_t srcSize, void*
|
||||
}
|
||||
|
||||
#ifndef ZSTD_DLL_IMPORT
|
||||
size_t local_ZSTD_compressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
size_t local_ZSTD_compressContinue(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
ZSTD_parameters p;
|
||||
ZSTD_frameParameters f = { 1 /* contentSizeHeader*/, 0, 0 };
|
||||
@ -287,7 +276,9 @@ size_t local_ZSTD_compressContinue(const void* src, size_t srcSize, void* dst, s
|
||||
}
|
||||
|
||||
#define FIRST_BLOCK_SIZE 8
|
||||
size_t local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
size_t local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
BYTE firstBlockBuf[FIRST_BLOCK_SIZE];
|
||||
|
||||
@ -298,15 +289,25 @@ size_t local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize, void
|
||||
ZSTD_compressBegin_advanced(g_zcc, NULL, 0, p, srcSize);
|
||||
memcpy(firstBlockBuf, src, FIRST_BLOCK_SIZE);
|
||||
|
||||
{ size_t const compressResult = ZSTD_compressContinue(g_zcc, dst, dstCapacity, firstBlockBuf, FIRST_BLOCK_SIZE);
|
||||
if (ZSTD_isError(compressResult)) { DISPLAY("local_ZSTD_compressContinue_extDict error : %s\n", ZSTD_getErrorName(compressResult)); return compressResult; }
|
||||
{ size_t const compressResult = ZSTD_compressContinue(g_zcc,
|
||||
dst, dstCapacity,
|
||||
firstBlockBuf, FIRST_BLOCK_SIZE);
|
||||
if (ZSTD_isError(compressResult)) {
|
||||
DISPLAY("local_ZSTD_compressContinue_extDict error : %s\n",
|
||||
ZSTD_getErrorName(compressResult));
|
||||
return compressResult;
|
||||
}
|
||||
dst = (BYTE*)dst + compressResult;
|
||||
dstCapacity -= compressResult;
|
||||
}
|
||||
return ZSTD_compressEnd(g_zcc, dst, dstCapacity, (const BYTE*)src + FIRST_BLOCK_SIZE, srcSize - FIRST_BLOCK_SIZE);
|
||||
return ZSTD_compressEnd(g_zcc, dst, dstCapacity,
|
||||
(const BYTE*)src + FIRST_BLOCK_SIZE,
|
||||
srcSize - FIRST_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
|
||||
size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize,
|
||||
void* dst, size_t dstCapacity,
|
||||
void* buff2)
|
||||
{
|
||||
size_t regeneratedSize = 0;
|
||||
const BYTE* ip = (const BYTE*)buff2;
|
||||
@ -314,7 +315,7 @@ size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize, void* dst,
|
||||
BYTE* op = (BYTE*)dst;
|
||||
size_t remainingCapacity = dstCapacity;
|
||||
|
||||
(void)src; (void)srcSize;
|
||||
(void)src; (void)srcSize; /* unused */
|
||||
ZSTD_decompressBegin(g_zdc);
|
||||
while (ip < iend) {
|
||||
size_t const iSize = ZSTD_nextSrcSizeToDecompress(g_zdc);
|
||||
@ -333,14 +334,16 @@ size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize, void* dst,
|
||||
/*_*******************************************************
|
||||
* Bench functions
|
||||
*********************************************************/
|
||||
static size_t benchMem(const void* src, size_t srcSize, U32 benchNb, int cLevel, ZSTD_compressionParameters* cparams)
|
||||
static size_t benchMem(U32 benchNb,
|
||||
const void* src, size_t srcSize,
|
||||
int cLevel, ZSTD_compressionParameters cparams)
|
||||
{
|
||||
BYTE* dstBuff;
|
||||
size_t dstBuffSize = ZSTD_compressBound(srcSize);
|
||||
void* buff2, *buff1;
|
||||
BYTE* dstBuff;
|
||||
void* dstBuff2;
|
||||
void* buff2;
|
||||
const char* benchName;
|
||||
BMK_benchFn_t benchFunction;
|
||||
BMK_customReturn_t r;
|
||||
int errorcode = 0;
|
||||
|
||||
/* Selection */
|
||||
@ -393,56 +396,56 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb, int cLevel,
|
||||
|
||||
/* Allocation */
|
||||
dstBuff = (BYTE*)malloc(dstBuffSize);
|
||||
buff2 = malloc(dstBuffSize);
|
||||
if ((!dstBuff) || (!buff2)) {
|
||||
dstBuff2 = malloc(dstBuffSize);
|
||||
if ((!dstBuff) || (!dstBuff2)) {
|
||||
DISPLAY("\nError: not enough memory!\n");
|
||||
free(dstBuff); free(buff2);
|
||||
free(dstBuff); free(dstBuff2);
|
||||
return 12;
|
||||
}
|
||||
buff1 = buff2;
|
||||
buff2 = dstBuff2;
|
||||
if (g_zcc==NULL) g_zcc = ZSTD_createCCtx();
|
||||
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
|
||||
if (g_cstream==NULL) g_cstream = ZSTD_createCStream();
|
||||
if (g_dstream==NULL) g_dstream = ZSTD_createDStream();
|
||||
|
||||
/* DISPLAY("params: cLevel %d, wlog %d hlog %d clog %d slog %d slen %d tlen %d strat %d \n"
|
||||
, cLevel, cparams->windowLog, cparams->hashLog, cparams->chainLog, cparams->searchLog,
|
||||
/* DISPLAY("params: cLevel %d, wlog %d hlog %d clog %d slog %d slen %d tlen %d strat %d \n",
|
||||
cLevel, cparams->windowLog, cparams->hashLog, cparams->chainLog, cparams->searchLog,
|
||||
cparams->searchLength, cparams->targetLength, cparams->strategy); */
|
||||
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_compressionLevel, cLevel);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_windowLog, cparams->windowLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_hashLog, cparams->hashLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_chainLog, cparams->chainLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_searchLog, cparams->searchLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_minMatch, cparams->searchLength);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_targetLength, cparams->targetLength);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_compressionStrategy, cparams->strategy);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_windowLog, cparams.windowLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_hashLog, cparams.hashLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_chainLog, cparams.chainLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_searchLog, cparams.searchLog);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_minMatch, cparams.searchLength);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_targetLength, cparams.targetLength);
|
||||
ZSTD_CCtx_setParameter(g_zcc, ZSTD_p_compressionStrategy, cparams.strategy);
|
||||
|
||||
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionLevel, cLevel);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_windowLog, cparams->windowLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_hashLog, cparams->hashLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_chainLog, cparams->chainLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_searchLog, cparams->searchLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_minMatch, cparams->searchLength);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_targetLength, cparams->targetLength);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionStrategy, cparams->strategy);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_windowLog, cparams.windowLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_hashLog, cparams.hashLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_chainLog, cparams.chainLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_searchLog, cparams.searchLog);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_minMatch, cparams.searchLength);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_targetLength, cparams.targetLength);
|
||||
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionStrategy, cparams.strategy);
|
||||
|
||||
/* Preparation */
|
||||
switch(benchNb)
|
||||
{
|
||||
case 1:
|
||||
buff2 = (void*)cparams;
|
||||
buff2 = &cparams;
|
||||
break;
|
||||
case 2:
|
||||
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, cLevel);
|
||||
break;
|
||||
#ifndef ZSTD_DLL_IMPORT
|
||||
case 11:
|
||||
buff2 = (void*)cparams;
|
||||
buff2 = &cparams;
|
||||
break;
|
||||
case 12:
|
||||
buff2 = (void*)cparams;
|
||||
buff2 = &cparams;
|
||||
break;
|
||||
case 13 :
|
||||
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, cLevel);
|
||||
@ -495,7 +498,7 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb, int cLevel,
|
||||
goto _cleanOut;
|
||||
#endif
|
||||
case 41 :
|
||||
buff2 = (void*)cparams;
|
||||
buff2 = &cparams;
|
||||
break;
|
||||
case 42 :
|
||||
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, cLevel);
|
||||
@ -507,29 +510,50 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb, int cLevel,
|
||||
default : ;
|
||||
}
|
||||
|
||||
|
||||
/* warming up memory */
|
||||
/* warming up dstBuff */
|
||||
{ size_t i; for (i=0; i<dstBuffSize; i++) dstBuff[i]=(BYTE)i; }
|
||||
|
||||
|
||||
/* benchmark loop */
|
||||
{
|
||||
void* dstBuffv = (void*)dstBuff;
|
||||
r = BMK_benchFunction(benchFunction, buff2,
|
||||
NULL, NULL, 1, &src, &srcSize,
|
||||
&dstBuffv, &dstBuffSize, NULL, g_nbIterations);
|
||||
if(r.error) {
|
||||
DISPLAY("ERROR %d ! ! \n", r.error);
|
||||
errorcode = r.error;
|
||||
{ BMK_timedFnState_t* const tfs = BMK_createTimedFnState(g_nbIterations * 1000, 1000);
|
||||
BMK_runTime_t bestResult;
|
||||
bestResult.sumOfReturn = 0;
|
||||
bestResult.nanoSecPerRun = (unsigned long long)(-1LL);
|
||||
assert(tfs != NULL);
|
||||
for (;;) {
|
||||
void* const dstBuffv = dstBuff;
|
||||
BMK_runOutcome_t const bOutcome =
|
||||
BMK_benchTimedFn( tfs,
|
||||
benchFunction, buff2,
|
||||
NULL, NULL, /* initFn */
|
||||
1, /* blockCount */
|
||||
&src, &srcSize,
|
||||
&dstBuffv, &dstBuffSize,
|
||||
NULL);
|
||||
|
||||
if (!BMK_isSuccessful_runOutcome(bOutcome)) {
|
||||
DISPLAY("ERROR benchmarking function ! ! \n");
|
||||
errorcode = 1;
|
||||
goto _cleanOut;
|
||||
}
|
||||
|
||||
DISPLAY("%2u#Speed: %f MB/s - Size: %f MB - %s\n", benchNb, (double)srcSize / r.result.nanoSecPerRun * 1000, (double)r.result.sumOfReturn / 1000000, benchName);
|
||||
{ BMK_runTime_t const newResult = BMK_extract_runTime(bOutcome);
|
||||
if (newResult.nanoSecPerRun < bestResult.nanoSecPerRun )
|
||||
bestResult.nanoSecPerRun = newResult.nanoSecPerRun;
|
||||
DISPLAY("\r%2u#%-29.29s:%8.1f MB/s (%8u) ",
|
||||
benchNb, benchName,
|
||||
(double)srcSize * TIMELOOP_NANOSEC / bestResult.nanoSecPerRun / MB_UNIT,
|
||||
(unsigned)newResult.sumOfReturn );
|
||||
}
|
||||
|
||||
if ( BMK_isCompleted_TimedFn(tfs) ) break;
|
||||
}
|
||||
BMK_freeTimedFnState(tfs);
|
||||
}
|
||||
DISPLAY("\n");
|
||||
|
||||
_cleanOut:
|
||||
free(buff1);
|
||||
free(dstBuff);
|
||||
free(dstBuff2);
|
||||
ZSTD_freeCCtx(g_zcc); g_zcc=NULL;
|
||||
ZSTD_freeDCtx(g_zdc); g_zdc=NULL;
|
||||
ZSTD_freeCStream(g_cstream); g_cstream=NULL;
|
||||
@ -538,65 +562,70 @@ _cleanOut:
|
||||
}
|
||||
|
||||
|
||||
static int benchSample(U32 benchNb, int cLevel, ZSTD_compressionParameters* cparams)
|
||||
static int benchSample(U32 benchNb,
|
||||
int cLevel, ZSTD_compressionParameters cparams)
|
||||
{
|
||||
size_t const benchedSize = g_sampleSize;
|
||||
const char* name = "Sample 10MiB";
|
||||
const char* const name = "Sample 10MiB";
|
||||
|
||||
/* Allocation */
|
||||
void* origBuff = malloc(benchedSize);
|
||||
void* const origBuff = malloc(benchedSize);
|
||||
if (!origBuff) { DISPLAY("\nError: not enough memory!\n"); return 12; }
|
||||
|
||||
/* Fill buffer */
|
||||
RDG_genBuffer(origBuff, benchedSize, g_compressibility, 0.0, 0);
|
||||
|
||||
/* bench */
|
||||
DISPLAY("\r%79s\r", "");
|
||||
DISPLAY("\r%70s\r", "");
|
||||
DISPLAY(" %s : \n", name);
|
||||
if (benchNb)
|
||||
benchMem(origBuff, benchedSize, benchNb, cLevel, cparams);
|
||||
else
|
||||
for (benchNb=0; benchNb<100; benchNb++) benchMem(origBuff, benchedSize, benchNb, cLevel, cparams);
|
||||
if (benchNb) {
|
||||
benchMem(benchNb, origBuff, benchedSize, cLevel, cparams);
|
||||
} else { /* 0 == run all tests */
|
||||
for (benchNb=0; benchNb<100; benchNb++) {
|
||||
benchMem(benchNb, origBuff, benchedSize, cLevel, cparams);
|
||||
} }
|
||||
|
||||
free(origBuff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int benchFiles(const char** fileNamesTable, const int nbFiles, U32 benchNb, int cLevel, ZSTD_compressionParameters* cparams)
|
||||
static int benchFiles(U32 benchNb,
|
||||
const char** fileNamesTable, const int nbFiles,
|
||||
int cLevel, ZSTD_compressionParameters cparams)
|
||||
{
|
||||
/* Loop for each file */
|
||||
int fileIdx;
|
||||
for (fileIdx=0; fileIdx<nbFiles; fileIdx++) {
|
||||
const char* const inFileName = fileNamesTable[fileIdx];
|
||||
FILE* const inFile = fopen( inFileName, "rb" );
|
||||
U64 inFileSize;
|
||||
size_t benchedSize;
|
||||
void* origBuff;
|
||||
|
||||
/* Check file existence */
|
||||
if (inFile==NULL) { DISPLAY( "Pb opening %s\n", inFileName); return 11; }
|
||||
|
||||
/* Memory allocation & restrictions */
|
||||
inFileSize = UTIL_getFileSize(inFileName);
|
||||
{ U64 const inFileSize = UTIL_getFileSize(inFileName);
|
||||
if (inFileSize == UTIL_FILESIZE_UNKNOWN) {
|
||||
DISPLAY( "Cannot measure size of %s\n", inFileName);
|
||||
fclose(inFile);
|
||||
return 11;
|
||||
}
|
||||
benchedSize = BMK_findMaxMem(inFileSize*3) / 3;
|
||||
if ((U64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;
|
||||
if (benchedSize < inFileSize)
|
||||
DISPLAY("Not enough memory for '%s' full size; testing %u MB only...\n", inFileName, (U32)(benchedSize>>20));
|
||||
if ((U64)benchedSize > inFileSize)
|
||||
benchedSize = (size_t)inFileSize;
|
||||
if ((U64)benchedSize < inFileSize) {
|
||||
DISPLAY("Not enough memory for '%s' full size; testing %u MB only... \n",
|
||||
inFileName, (U32)(benchedSize>>20));
|
||||
} }
|
||||
|
||||
/* Alloc */
|
||||
origBuff = malloc(benchedSize);
|
||||
{ void* const origBuff = malloc(benchedSize);
|
||||
if (!origBuff) { DISPLAY("\nError: not enough memory!\n"); fclose(inFile); return 12; }
|
||||
|
||||
/* Fill input buffer */
|
||||
DISPLAY("Loading %s... \r", inFileName);
|
||||
{
|
||||
size_t readSize = fread(origBuff, 1, benchedSize, inFile);
|
||||
{ size_t const readSize = fread(origBuff, 1, benchedSize, inFile);
|
||||
fclose(inFile);
|
||||
if (readSize != benchedSize) {
|
||||
DISPLAY("\nError: problem reading file '%s' !! \n", inFileName);
|
||||
@ -605,20 +634,66 @@ static int benchFiles(const char** fileNamesTable, const int nbFiles, U32 benchN
|
||||
} }
|
||||
|
||||
/* bench */
|
||||
DISPLAY("\r%79s\r", "");
|
||||
DISPLAY("\r%70s\r", ""); /* blank line */
|
||||
DISPLAY(" %s : \n", inFileName);
|
||||
if (benchNb)
|
||||
benchMem(origBuff, benchedSize, benchNb, cLevel, cparams);
|
||||
else
|
||||
for (benchNb=0; benchNb<100; benchNb++) benchMem(origBuff, benchedSize, benchNb, cLevel, cparams);
|
||||
if (benchNb) {
|
||||
benchMem(benchNb, origBuff, benchedSize, cLevel, cparams);
|
||||
} else {
|
||||
for (benchNb=0; benchNb<100; benchNb++) {
|
||||
benchMem(benchNb, origBuff, benchedSize, cLevel, cparams);
|
||||
} }
|
||||
|
||||
free(origBuff);
|
||||
}
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*_*******************************************************
|
||||
* Argument Parsing
|
||||
*********************************************************/
|
||||
|
||||
#define ERROR_OUT(msg) { DISPLAY("%s \n", msg); exit(1); }
|
||||
|
||||
static unsigned readU32FromChar(const char** stringPtr)
|
||||
{
|
||||
const char errorMsg[] = "error: numeric value too large";
|
||||
unsigned result = 0;
|
||||
while ((**stringPtr >='0') && (**stringPtr <='9')) {
|
||||
unsigned const max = (((unsigned)(-1)) / 10) - 1;
|
||||
if (result > max) ERROR_OUT(errorMsg);
|
||||
result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;
|
||||
}
|
||||
if ((**stringPtr=='K') || (**stringPtr=='M')) {
|
||||
unsigned const maxK = ((unsigned)(-1)) >> 10;
|
||||
if (result > maxK) ERROR_OUT(errorMsg);
|
||||
result <<= 10;
|
||||
if (**stringPtr=='M') {
|
||||
if (result > maxK) ERROR_OUT(errorMsg);
|
||||
result <<= 10;
|
||||
}
|
||||
(*stringPtr)++; /* skip `K` or `M` */
|
||||
if (**stringPtr=='i') (*stringPtr)++;
|
||||
if (**stringPtr=='B') (*stringPtr)++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
|
||||
{
|
||||
size_t const comSize = strlen(longCommand);
|
||||
int const result = !strncmp(*stringPtr, longCommand, comSize);
|
||||
if (result) *stringPtr += comSize;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*_*******************************************************
|
||||
* Command line
|
||||
*********************************************************/
|
||||
|
||||
static int usage(const char* exename)
|
||||
{
|
||||
DISPLAY( "Usage :\n");
|
||||
@ -649,8 +724,8 @@ static int badusage(const char* exename)
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
{
|
||||
int i, filenamesStart=0, result;
|
||||
const char* exename = argv[0];
|
||||
int argNb, filenamesStart=0, result;
|
||||
const char* const exename = argv[0];
|
||||
const char* input_filename = NULL;
|
||||
U32 benchNb = 0, main_pause = 0;
|
||||
int cLevel = DEFAULT_CLEVEL;
|
||||
@ -659,8 +734,8 @@ int main(int argc, const char** argv)
|
||||
DISPLAY(WELCOME_MESSAGE);
|
||||
if (argc<1) return badusage(exename);
|
||||
|
||||
for(i=1; i<argc; i++) {
|
||||
const char* argument = argv[i];
|
||||
for (argNb=1; argNb<argc; argNb++) {
|
||||
const char* argument = argv[argNb];
|
||||
assert(argument != NULL);
|
||||
|
||||
if (longCommandWArg(&argument, "--zstd=")) {
|
||||
@ -677,12 +752,14 @@ int main(int argc, const char** argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* check end of string */
|
||||
if (argument[0] != 0) {
|
||||
DISPLAY("invalid --zstd= format \n");
|
||||
return 1; // check the end of string
|
||||
return 1;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
} else if (argument[0]=='-') { /* Commands (note : aggregated commands are allowed) */
|
||||
argument++;
|
||||
while (argument[0]!=0) {
|
||||
@ -698,35 +775,27 @@ int main(int argc, const char** argv)
|
||||
|
||||
/* Select specific algorithm to bench */
|
||||
case 'b':
|
||||
{
|
||||
argument++;
|
||||
benchNb = readU32FromChar(&argument);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Modify Nb Iterations */
|
||||
case 'i':
|
||||
{
|
||||
argument++;
|
||||
BMK_SetNbIterations((int)readU32FromChar(&argument));
|
||||
}
|
||||
break;
|
||||
|
||||
/* Select compressibility of synthetic sample */
|
||||
case 'P':
|
||||
{ argument++;
|
||||
argument++;
|
||||
g_compressibility = (double)readU32FromChar(&argument) / 100.;
|
||||
}
|
||||
break;
|
||||
case 'l':
|
||||
{ argument++;
|
||||
argument++;
|
||||
cLevel = readU32FromChar(&argument);
|
||||
cparams = ZSTD_getCParams(cLevel, 0, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
|
||||
/* Unknown command */
|
||||
default : return badusage(exename);
|
||||
}
|
||||
@ -735,15 +804,15 @@ int main(int argc, const char** argv)
|
||||
}
|
||||
|
||||
/* first provided filename is input */
|
||||
if (!input_filename) { input_filename=argument; filenamesStart=i; continue; }
|
||||
if (!input_filename) { input_filename=argument; filenamesStart=argNb; continue; }
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (filenamesStart==0) /* no input file */
|
||||
result = benchSample(benchNb, cLevel, &cparams);
|
||||
result = benchSample(benchNb, cLevel, cparams);
|
||||
else
|
||||
result = benchFiles(argv+filenamesStart, argc-filenamesStart, benchNb, cLevel, &cparams);
|
||||
result = benchFiles(benchNb, argv+filenamesStart, argc-filenamesStart, cLevel, cparams);
|
||||
|
||||
if (main_pause) { int unused; printf("press enter...\n"); unused = getchar(); (void)unused; }
|
||||
|
||||
|
@ -1375,6 +1375,24 @@ static int basicUnitTests(U32 seed, double compressibility)
|
||||
((BYTE*)CNBuffer)[i+1] = _3BytesSeqs[id][1];
|
||||
((BYTE*)CNBuffer)[i+2] = _3BytesSeqs[id][2];
|
||||
} } }
|
||||
DISPLAYLEVEL(3, "test%3i : growing nbSeq : ", testNb++);
|
||||
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
|
||||
size_t const maxNbSeq = _3BYTESTESTLENGTH / 3;
|
||||
size_t const bound = ZSTD_compressBound(_3BYTESTESTLENGTH);
|
||||
size_t nbSeq = 1;
|
||||
while (nbSeq <= maxNbSeq) {
|
||||
CHECK(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, nbSeq * 3, 19));
|
||||
/* Check every sequence for the first 100, then skip more rapidly. */
|
||||
if (nbSeq < 100) {
|
||||
++nbSeq;
|
||||
} else {
|
||||
nbSeq += (nbSeq >> 2);
|
||||
}
|
||||
}
|
||||
ZSTD_freeCCtx(cctx);
|
||||
}
|
||||
DISPLAYLEVEL(3, "OK \n");
|
||||
|
||||
DISPLAYLEVEL(3, "test%3i : compress lots 3-bytes sequences : ", testNb++);
|
||||
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH),
|
||||
CNBuffer, _3BYTESTESTLENGTH, 19) );
|
||||
@ -1386,8 +1404,26 @@ static int basicUnitTests(U32 seed, double compressibility)
|
||||
if (r != _3BYTESTESTLENGTH) goto _output_error; }
|
||||
DISPLAYLEVEL(3, "OK \n");
|
||||
|
||||
DISPLAYLEVEL(3, "test%3i : incompressible data and ill suited dictionary : ", testNb++);
|
||||
|
||||
DISPLAYLEVEL(3, "test%3i : growing literals buffer : ", testNb++);
|
||||
RDG_genBuffer(CNBuffer, CNBuffSize, 0.0, 0.1, seed);
|
||||
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
|
||||
size_t const bound = ZSTD_compressBound(CNBuffSize);
|
||||
size_t size = 1;
|
||||
while (size <= CNBuffSize) {
|
||||
CHECK(ZSTD_compressCCtx(cctx, compressedBuffer, bound, CNBuffer, size, 3));
|
||||
/* Check every size for the first 100, then skip more rapidly. */
|
||||
if (size < 100) {
|
||||
++size;
|
||||
} else {
|
||||
size += (size >> 2);
|
||||
}
|
||||
}
|
||||
ZSTD_freeCCtx(cctx);
|
||||
}
|
||||
DISPLAYLEVEL(3, "OK \n");
|
||||
|
||||
DISPLAYLEVEL(3, "test%3i : incompressible data and ill suited dictionary : ", testNb++);
|
||||
{ /* Train a dictionary on low characters */
|
||||
size_t dictSize = 16 KB;
|
||||
void* const dictBuffer = malloc(dictSize);
|
||||
|
@ -27,7 +27,8 @@
|
||||
#include "util.h"
|
||||
#include "bench.h"
|
||||
#include "zstd_errors.h"
|
||||
#include "zstd_internal.h"
|
||||
#include "zstd_internal.h" /* should not be needed */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Constants
|
||||
@ -46,6 +47,7 @@ static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t
|
||||
static const U64 g_maxVariationTime = 60 * SEC_TO_MICRO;
|
||||
static const int g_maxNbVariations = 64;
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Macros
|
||||
**************************************/
|
||||
@ -90,9 +92,9 @@ static const char* g_stratName[ZSTD_btultra+1] = {
|
||||
"ZSTD_greedy ", "ZSTD_lazy ", "ZSTD_lazy2 ",
|
||||
"ZSTD_btlazy2 ", "ZSTD_btopt ", "ZSTD_btultra "};
|
||||
|
||||
|
||||
static const U32 tlen_table[TLEN_RANGE] = { 0, 1, 2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 256, 512, 999 };
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Setup for Adding new params
|
||||
**************************************/
|
||||
@ -212,6 +214,7 @@ static void displayParamVal(FILE* f, varInds_t param, U32 value, int width) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Benchmark Parameters/Global Variables
|
||||
**************************************/
|
||||
@ -241,7 +244,7 @@ static U32 g_noSeed = 0;
|
||||
static paramValues_t g_params; /* Initialized at the beginning of main w/ emptyParams() function */
|
||||
static double g_ratioMultiplier = 5.;
|
||||
static U32 g_strictness = PARAM_UNSET; /* range 1 - 100, measure of how strict */
|
||||
static BMK_result_t g_lvltarget;
|
||||
static BMK_benchResult_t g_lvltarget;
|
||||
|
||||
typedef enum {
|
||||
directMap,
|
||||
@ -258,7 +261,7 @@ typedef struct {
|
||||
} memoTable_t;
|
||||
|
||||
typedef struct {
|
||||
BMK_result_t result;
|
||||
BMK_benchResult_t result;
|
||||
paramValues_t params;
|
||||
} winnerInfo_t;
|
||||
|
||||
@ -284,6 +287,7 @@ static winner_ll_node* g_winners; /* linked list sorted ascending by cSize & cSp
|
||||
* g_clockGranularity
|
||||
*/
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* General Util Functions
|
||||
*********************************************************/
|
||||
@ -464,7 +468,7 @@ static void paramVariation(paramValues_t* ptr, memoTable_t* mtAll, const U32 nbC
|
||||
static paramValues_t randomParams(void)
|
||||
{
|
||||
varInds_t v; paramValues_t p;
|
||||
for(v = 0; v <= NUM_PARAMS; v++) {
|
||||
for(v = 0; v < NUM_PARAMS; v++) {
|
||||
p.vals[v] = rangeMap(v, FUZ_rand(&g_rand) % rangetable[v]);
|
||||
}
|
||||
return p;
|
||||
@ -497,8 +501,11 @@ static void findClockGranularity(void) {
|
||||
**************************************/
|
||||
|
||||
/* checks results are feasible */
|
||||
static int feasible(const BMK_result_t results, const constraint_t target) {
|
||||
return (results.cSpeed >= target.cSpeed) && (results.dSpeed >= target.dSpeed) && (results.cMem <= target.cMem) && (!g_optmode || results.cSize <= g_lvltarget.cSize);
|
||||
static int feasible(const BMK_benchResult_t results, const constraint_t target) {
|
||||
return (results.cSpeed >= target.cSpeed)
|
||||
&& (results.dSpeed >= target.dSpeed)
|
||||
&& (results.cMem <= target.cMem)
|
||||
&& (!g_optmode || results.cSize <= g_lvltarget.cSize);
|
||||
}
|
||||
|
||||
/* hill climbing value for part 1 */
|
||||
@ -507,7 +514,7 @@ static int feasible(const BMK_result_t results, const constraint_t target) {
|
||||
* bonus to exceeding the constraint value. We also give linear ratio for compression ratio.
|
||||
* The constant factors are experimental.
|
||||
*/
|
||||
static double resultScore(const BMK_result_t res, const size_t srcSize, const constraint_t target) {
|
||||
static double resultScore(const BMK_benchResult_t res, const size_t srcSize, const constraint_t target) {
|
||||
double cs = 0., ds = 0., rt, cm = 0.;
|
||||
const double r1 = 1, r2 = 0.1, rtr = 0.5;
|
||||
double ret;
|
||||
@ -523,7 +530,7 @@ static double resultScore(const BMK_result_t res, const size_t srcSize, const co
|
||||
}
|
||||
|
||||
/* calculates normalized squared euclidean distance of result1 if it is in the first quadrant relative to lvlRes */
|
||||
static double resultDistLvl(const BMK_result_t result1, const BMK_result_t lvlRes) {
|
||||
static double resultDistLvl(const BMK_benchResult_t result1, const BMK_benchResult_t lvlRes) {
|
||||
double normalizedCSpeedGain1 = (result1.cSpeed / lvlRes.cSpeed) - 1;
|
||||
double normalizedRatioGain1 = ((double)lvlRes.cSize / result1.cSize) - 1;
|
||||
if(normalizedRatioGain1 < 0 || normalizedCSpeedGain1 < 0) {
|
||||
@ -533,7 +540,7 @@ static double resultDistLvl(const BMK_result_t result1, const BMK_result_t lvlRe
|
||||
}
|
||||
|
||||
/* return true if r2 strictly better than r1 */
|
||||
static int compareResultLT(const BMK_result_t result1, const BMK_result_t result2, const constraint_t target, size_t srcSize) {
|
||||
static int compareResultLT(const BMK_benchResult_t result1, const BMK_benchResult_t result2, const constraint_t target, size_t srcSize) {
|
||||
if(feasible(result1, target) && feasible(result2, target)) {
|
||||
if(g_optmode) {
|
||||
return resultDistLvl(result1, g_lvltarget) < resultDistLvl(result2, g_lvltarget);
|
||||
@ -637,7 +644,8 @@ static void BMK_translateAdvancedParams(FILE* f, const paramValues_t params) {
|
||||
fprintf(f, "\n");
|
||||
}
|
||||
|
||||
static void BMK_displayOneResult(FILE* f, winnerInfo_t res, const size_t srcSize) {
|
||||
static void BMK_displayOneResult(FILE* f, winnerInfo_t res, const size_t srcSize)
|
||||
{
|
||||
varInds_t v;
|
||||
int first = 1;
|
||||
res.params = cParamUnsetMin(res.params);
|
||||
@ -649,13 +657,19 @@ static void BMK_displayOneResult(FILE* f, winnerInfo_t res, const size_t srcSize
|
||||
first = 0;
|
||||
}
|
||||
|
||||
{ double const ratio = res.result.cSize ?
|
||||
(double)srcSize / res.result.cSize : 0;
|
||||
double const cSpeedMBps = (double)res.result.cSpeed / MB_UNIT;
|
||||
double const dSpeedMBps = (double)res.result.dSpeed / MB_UNIT;
|
||||
|
||||
fprintf(f, " }, /* R:%5.3f at %5.1f MB/s - %5.1f MB/s */\n",
|
||||
(double)srcSize / res.result.cSize, (double)res.result.cSpeed / (1 MB), (double)res.result.dSpeed / (1 MB));
|
||||
ratio, cSpeedMBps, dSpeedMBps);
|
||||
}
|
||||
}
|
||||
|
||||
/* Writes to f the results of a parameter benchmark */
|
||||
/* when used with --optimize, will only print results better than previously discovered */
|
||||
static void BMK_printWinner(FILE* f, const int cLevel, const BMK_result_t result, const paramValues_t params, const size_t srcSize)
|
||||
static void BMK_printWinner(FILE* f, const int cLevel, const BMK_benchResult_t result, const paramValues_t params, const size_t srcSize)
|
||||
{
|
||||
char lvlstr[15] = "Custom Level";
|
||||
winnerInfo_t w;
|
||||
@ -687,7 +701,7 @@ static void BMK_printWinner(FILE* f, const int cLevel, const BMK_result_t result
|
||||
#define SPEED_RESULT 4
|
||||
#define SIZE_RESULT 5
|
||||
/* maybe have epsilon-eq to limit table size? */
|
||||
static int speedSizeCompare(const BMK_result_t r1, const BMK_result_t r2) {
|
||||
static int speedSizeCompare(const BMK_benchResult_t r1, const BMK_benchResult_t r2) {
|
||||
if(r1.cSpeed < r2.cSpeed) {
|
||||
if(r1.cSize >= r2.cSize) {
|
||||
return BETTER_RESULT;
|
||||
@ -704,7 +718,7 @@ static int speedSizeCompare(const BMK_result_t r1, const BMK_result_t r2) {
|
||||
/* 0 for insertion, 1 for no insert */
|
||||
/* maintain invariant speedSizeCompare(n, n->next) = SPEED_RESULT */
|
||||
static int insertWinner(const winnerInfo_t w, const constraint_t targetConstraints) {
|
||||
BMK_result_t r = w.result;
|
||||
BMK_benchResult_t r = w.result;
|
||||
winner_ll_node* cur_node = g_winners;
|
||||
/* first node to insert */
|
||||
if(!feasible(r, targetConstraints)) {
|
||||
@ -797,7 +811,7 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
|
||||
}
|
||||
}
|
||||
|
||||
static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t result, const paramValues_t params, const constraint_t targetConstraints, const size_t srcSize)
|
||||
static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_benchResult_t result, const paramValues_t params, const constraint_t targetConstraints, const size_t srcSize)
|
||||
{
|
||||
/* global winner used for constraints */
|
||||
/* cSize, cSpeed, dSpeed, cMem */
|
||||
@ -833,7 +847,7 @@ static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t res
|
||||
}
|
||||
fprintf(f, "================================\n");
|
||||
fprintf(f, "Level Bounds: R: > %.3f AND C: < %.1f MB/s \n\n",
|
||||
(double)srcSize / g_lvltarget.cSize, (double)g_lvltarget.cSpeed / (1 MB));
|
||||
(double)srcSize / g_lvltarget.cSize, (double)g_lvltarget.cSpeed / MB_UNIT);
|
||||
|
||||
|
||||
fprintf(f, "Overall Winner: \n");
|
||||
@ -871,7 +885,7 @@ static void BMK_printWinners(FILE* f, const winnerInfo_t* winners, const size_t
|
||||
*********************************************************/
|
||||
|
||||
typedef struct {
|
||||
ZSTD_CCtx* ctx;
|
||||
ZSTD_CCtx* cctx;
|
||||
const void* dictBuffer;
|
||||
size_t dictBufferSize;
|
||||
int cLevel;
|
||||
@ -881,15 +895,15 @@ typedef struct {
|
||||
static size_t local_initCCtx(void* payload) {
|
||||
const BMK_initCCtxArgs* ag = (const BMK_initCCtxArgs*)payload;
|
||||
varInds_t i;
|
||||
ZSTD_CCtx_reset(ag->ctx);
|
||||
ZSTD_CCtx_resetParameters(ag->ctx);
|
||||
ZSTD_CCtx_setParameter(ag->ctx, ZSTD_p_compressionLevel, ag->cLevel);
|
||||
ZSTD_CCtx_reset(ag->cctx);
|
||||
ZSTD_CCtx_resetParameters(ag->cctx);
|
||||
ZSTD_CCtx_setParameter(ag->cctx, ZSTD_p_compressionLevel, ag->cLevel);
|
||||
|
||||
for(i = 0; i < NUM_PARAMS; i++) {
|
||||
if(ag->comprParams->vals[i] != PARAM_UNSET)
|
||||
ZSTD_CCtx_setParameter(ag->ctx, cctxSetParamTable[i], ag->comprParams->vals[i]);
|
||||
ZSTD_CCtx_setParameter(ag->cctx, cctxSetParamTable[i], ag->comprParams->vals[i]);
|
||||
}
|
||||
ZSTD_CCtx_loadDictionary(ag->ctx, ag->dictBuffer, ag->dictBufferSize);
|
||||
ZSTD_CCtx_loadDictionary(ag->cctx, ag->dictBuffer, ag->dictBufferSize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1378,16 +1392,17 @@ static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTa
|
||||
/* Replicate functionality of benchMemAdvanced, but with pre-split src / dst buffers */
|
||||
/* The purpose is so that sufficient information is returned so that a decompression call to benchMemInvertible is possible */
|
||||
/* BMK_benchMemAdvanced(srcBuffer,srcSize, dstBuffer, dstSize, fileSizes, nbFiles, 0, &cParams, dictBuffer, dictSize, ctx, dctx, 0, "File", &adv); */
|
||||
/* nbSeconds used in same way as in BMK_advancedParams_t, as nbIters when in iterMode */
|
||||
|
||||
/* nbSeconds used in same way as in BMK_advancedParams_t */
|
||||
/* if in decodeOnly, then srcPtr's will be compressed blocks, and uncompressedBlocks will be written to dstPtrs */
|
||||
/* dictionary nullable, nothing else though. */
|
||||
static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t ctx,
|
||||
const int cLevel, const paramValues_t* comprParams,
|
||||
const BMK_mode_t mode, const BMK_loopMode_t loopMode, const unsigned nbSeconds) {
|
||||
|
||||
/* note : it would be better if this function was in bench.c, sharing code with benchMemAdvanced(), since it's technically a part of it */
|
||||
static BMK_benchOutcome_t
|
||||
BMK_benchMemInvertible( buffers_t buf, contexts_t ctx,
|
||||
int cLevel, const paramValues_t* comprParams,
|
||||
BMK_mode_t mode, unsigned nbSeconds)
|
||||
{
|
||||
U32 i;
|
||||
BMK_return_t results = { { 0, 0., 0., 0 }, 0 } ;
|
||||
BMK_benchResult_t bResult;
|
||||
const void *const *const srcPtrs = (const void *const *const)buf.srcPtrs;
|
||||
size_t const *const srcSizes = buf.srcSizes;
|
||||
void** const dstPtrs = buf.dstPtrs;
|
||||
@ -1402,6 +1417,9 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
|
||||
ZSTD_CCtx* cctx = ctx.cctx;
|
||||
ZSTD_DCtx* dctx = ctx.dctx;
|
||||
|
||||
/* init */
|
||||
memset(&bResult, 0, sizeof(bResult));
|
||||
|
||||
/* warmimg up memory */
|
||||
for (i = 0; i < buf.nbBlocks; i++) {
|
||||
if (mode != BMK_decodeOnly) {
|
||||
@ -1414,9 +1432,13 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
|
||||
/* Bench */
|
||||
{
|
||||
/* init args */
|
||||
int compressionCompleted = (mode == BMK_decodeOnly);
|
||||
int decompressionCompleted = (mode == BMK_compressOnly);
|
||||
BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(nbSeconds * 1000, 1000);
|
||||
BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(nbSeconds * 1000, 1000);
|
||||
BMK_initCCtxArgs cctxprep;
|
||||
BMK_initDCtxArgs dctxprep;
|
||||
cctxprep.ctx = cctx;
|
||||
cctxprep.cctx = cctx;
|
||||
cctxprep.dictBuffer = dictBuffer;
|
||||
cctxprep.dictBufferSize = dictBufferSize;
|
||||
cctxprep.cLevel = cLevel;
|
||||
@ -1425,130 +1447,115 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
|
||||
dctxprep.dictBuffer = dictBuffer;
|
||||
dctxprep.dictBufferSize = dictBufferSize;
|
||||
|
||||
if(loopMode == BMK_timeMode) {
|
||||
BMK_customTimedReturn_t intermediateResultCompress;
|
||||
BMK_customTimedReturn_t intermediateResultDecompress;
|
||||
BMK_timedFnState_t* timeStateCompress = BMK_createTimeState(nbSeconds);
|
||||
BMK_timedFnState_t* timeStateDecompress = BMK_createTimeState(nbSeconds);
|
||||
if(mode == BMK_compressOnly) {
|
||||
intermediateResultCompress.completed = 0;
|
||||
intermediateResultDecompress.completed = 1;
|
||||
} else if (mode == BMK_decodeOnly) {
|
||||
intermediateResultCompress.completed = 1;
|
||||
intermediateResultDecompress.completed = 0;
|
||||
} else { /* both */
|
||||
intermediateResultCompress.completed = 0;
|
||||
intermediateResultDecompress.completed = 0;
|
||||
assert(timeStateCompress != NULL);
|
||||
assert(timeStateDecompress != NULL);
|
||||
while(!compressionCompleted) {
|
||||
BMK_runOutcome_t const cOutcome = BMK_benchTimedFn(timeStateCompress,
|
||||
&local_defaultCompress, cctx,
|
||||
&local_initCCtx, &cctxprep,
|
||||
nbBlocks,
|
||||
srcPtrs, srcSizes,
|
||||
dstPtrs, dstCapacities,
|
||||
dstSizes);
|
||||
|
||||
if (!BMK_isSuccessful_runOutcome(cOutcome)) {
|
||||
BMK_benchOutcome_t bOut;
|
||||
memset(&bOut, 0, sizeof(bOut));
|
||||
bOut.tag = 1; /* should rather be a function or a constant */
|
||||
BMK_freeTimedFnState(timeStateCompress);
|
||||
BMK_freeTimedFnState(timeStateDecompress);
|
||||
return bOut;
|
||||
}
|
||||
{ BMK_runTime_t const rResult = BMK_extract_runTime(cOutcome);
|
||||
bResult.cSpeed = (srcSize * TIMELOOP_NANOSEC) / rResult.nanoSecPerRun;
|
||||
bResult.cSize = rResult.sumOfReturn;
|
||||
}
|
||||
compressionCompleted = BMK_isCompleted_TimedFn(timeStateCompress);
|
||||
}
|
||||
|
||||
while(!intermediateResultCompress.completed) {
|
||||
intermediateResultCompress = BMK_benchFunctionTimed(timeStateCompress, &local_defaultCompress, (void*)cctx, &local_initCCtx, (void*)&cctxprep,
|
||||
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes);
|
||||
while (!decompressionCompleted) {
|
||||
BMK_runOutcome_t const dOutcome = BMK_benchTimedFn(timeStateDecompress,
|
||||
&local_defaultDecompress, dctx,
|
||||
&local_initDCtx, &dctxprep,
|
||||
nbBlocks,
|
||||
(const void* const*)dstPtrs, dstSizes,
|
||||
resPtrs, resSizes,
|
||||
NULL);
|
||||
|
||||
if(intermediateResultCompress.result.error) {
|
||||
results.error = intermediateResultCompress.result.error;
|
||||
BMK_freeTimeState(timeStateCompress);
|
||||
BMK_freeTimeState(timeStateDecompress);
|
||||
return results;
|
||||
if (!BMK_isSuccessful_runOutcome(dOutcome)) {
|
||||
BMK_benchOutcome_t bOut;
|
||||
memset(&bOut, 0, sizeof(bOut));
|
||||
bOut.tag = 1; /* should rather be a function or a constant */
|
||||
BMK_freeTimedFnState(timeStateCompress);
|
||||
BMK_freeTimedFnState(timeStateDecompress);
|
||||
return bOut;
|
||||
}
|
||||
results.result.cSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultCompress.result.result.nanoSecPerRun;
|
||||
results.result.cSize = intermediateResultCompress.result.result.sumOfReturn;
|
||||
{ BMK_runTime_t const rResult = BMK_extract_runTime(dOutcome);
|
||||
bResult.dSpeed = (srcSize * TIMELOOP_NANOSEC) / rResult.nanoSecPerRun;
|
||||
}
|
||||
decompressionCompleted = BMK_isCompleted_TimedFn(timeStateDecompress);
|
||||
}
|
||||
|
||||
while(!intermediateResultDecompress.completed) {
|
||||
intermediateResultDecompress = BMK_benchFunctionTimed(timeStateDecompress, &local_defaultDecompress, (void*)(dctx), &local_initDCtx, (void*)&dctxprep,
|
||||
nbBlocks, (const void* const*)dstPtrs, dstSizes, resPtrs, resSizes, NULL);
|
||||
|
||||
if(intermediateResultDecompress.result.error) {
|
||||
results.error = intermediateResultDecompress.result.error;
|
||||
BMK_freeTimeState(timeStateCompress);
|
||||
BMK_freeTimeState(timeStateDecompress);
|
||||
return results;
|
||||
}
|
||||
results.result.dSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultDecompress.result.result.nanoSecPerRun;
|
||||
BMK_freeTimedFnState(timeStateCompress);
|
||||
BMK_freeTimedFnState(timeStateDecompress);
|
||||
}
|
||||
|
||||
BMK_freeTimeState(timeStateCompress);
|
||||
BMK_freeTimeState(timeStateDecompress);
|
||||
|
||||
} else { /* iterMode; */
|
||||
if(mode != BMK_decodeOnly) {
|
||||
|
||||
BMK_customReturn_t compressionResults = BMK_benchFunction(&local_defaultCompress, (void*)cctx, &local_initCCtx, (void*)&cctxprep,
|
||||
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes, nbSeconds);
|
||||
if(compressionResults.error) {
|
||||
results.error = compressionResults.error;
|
||||
return results;
|
||||
}
|
||||
if(compressionResults.result.nanoSecPerRun == 0) {
|
||||
results.result.cSpeed = 0;
|
||||
} else {
|
||||
results.result.cSpeed = srcSize * TIMELOOP_NANOSEC / compressionResults.result.nanoSecPerRun;
|
||||
}
|
||||
results.result.cSize = compressionResults.result.sumOfReturn;
|
||||
}
|
||||
|
||||
if(mode != BMK_compressOnly) {
|
||||
BMK_customReturn_t decompressionResults;
|
||||
decompressionResults = BMK_benchFunction(
|
||||
&local_defaultDecompress, (void*)(dctx),
|
||||
&local_initDCtx, (void*)&dctxprep, nbBlocks,
|
||||
(const void* const*)dstPtrs, dstSizes, resPtrs, resSizes, NULL,
|
||||
nbSeconds);
|
||||
|
||||
if(decompressionResults.error) {
|
||||
results.error = decompressionResults.error;
|
||||
return results;
|
||||
}
|
||||
|
||||
if(decompressionResults.result.nanoSecPerRun == 0) {
|
||||
results.result.dSpeed = 0;
|
||||
} else {
|
||||
results.result.dSpeed = srcSize * TIMELOOP_NANOSEC / decompressionResults.result.nanoSecPerRun;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Bench */
|
||||
results.result.cMem = (1 << (comprParams->vals[wlog_ind])) + ZSTD_sizeof_CCtx(cctx);
|
||||
return results;
|
||||
bResult.cMem = (1 << (comprParams->vals[wlog_ind])) + ZSTD_sizeof_CCtx(cctx);
|
||||
|
||||
{ BMK_benchOutcome_t bOut;
|
||||
bOut.tag = 0;
|
||||
bOut.internal_never_use_directly = bResult; /* should be a function */
|
||||
return bOut;
|
||||
}
|
||||
}
|
||||
|
||||
static int BMK_benchParam(BMK_result_t* resultPtr,
|
||||
const buffers_t buf, const contexts_t ctx,
|
||||
const paramValues_t cParams) {
|
||||
BMK_return_t res = BMK_benchMemInvertible(buf, ctx, BASE_CLEVEL, &cParams, BMK_both, BMK_timeMode, 3);
|
||||
*resultPtr = res.result;
|
||||
return res.error;
|
||||
static int BMK_benchParam ( BMK_benchResult_t* resultPtr,
|
||||
buffers_t buf, contexts_t ctx,
|
||||
paramValues_t cParams)
|
||||
{
|
||||
BMK_benchOutcome_t const outcome = BMK_benchMemInvertible(buf, ctx,
|
||||
BASE_CLEVEL, &cParams,
|
||||
BMK_both, 3);
|
||||
int const success = BMK_isSuccessful_benchOutcome(outcome);
|
||||
if (!success) return 1;
|
||||
*resultPtr = BMK_extract_benchResult(outcome);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define CBENCHMARK(conditional, resultvar, tmpret, mode, loopmode, sec) { \
|
||||
#define CBENCHMARK(conditional, resultvar, tmpret, mode, sec) { \
|
||||
if(conditional) { \
|
||||
BMK_return_t tmpret = BMK_benchMemInvertible(buf, ctx, BASE_CLEVEL, &cParams, mode, loopmode, sec); \
|
||||
if(tmpret.error) { DEBUGOUTPUT("Benchmarking failed\n"); return ERROR_RESULT; } \
|
||||
BMK_benchOutcome_t const outcome = BMK_benchMemInvertible(buf, ctx, BASE_CLEVEL, &cParams, mode, sec); \
|
||||
if (!BMK_isSuccessful_benchOutcome(outcome)) { \
|
||||
DEBUGOUTPUT("Benchmarking failed\n"); \
|
||||
return ERROR_RESULT; \
|
||||
} \
|
||||
{ BMK_benchResult_t const tmpResult = BMK_extract_benchResult(outcome); \
|
||||
if (mode != BMK_decodeOnly) { \
|
||||
resultvar.cSpeed = tmpret.result.cSpeed; \
|
||||
resultvar.cSize = tmpret.result.cSize; \
|
||||
resultvar.cMem = tmpret.result.cMem; \
|
||||
} \
|
||||
if(mode != BMK_compressOnly) { resultvar.dSpeed = tmpret.result.dSpeed; } \
|
||||
resultvar.cSpeed = tmpResult.cSpeed; \
|
||||
resultvar.cSize = tmpResult.cSize; \
|
||||
resultvar.cMem = tmpResult.cMem; \
|
||||
} \
|
||||
if (mode != BMK_compressOnly) { resultvar.dSpeed = tmpResult.dSpeed; } \
|
||||
} } \
|
||||
}
|
||||
|
||||
/* Benchmarking which stops when we are sufficiently sure the solution is infeasible / worse than the winner */
|
||||
#define VARIANCE 1.2
|
||||
static int allBench(BMK_result_t* resultPtr,
|
||||
static int allBench(BMK_benchResult_t* resultPtr,
|
||||
const buffers_t buf, const contexts_t ctx,
|
||||
const paramValues_t cParams,
|
||||
const constraint_t target,
|
||||
BMK_result_t* winnerResult, int feas) {
|
||||
BMK_result_t resultMax, benchres;
|
||||
BMK_benchResult_t* winnerResult, int feas)
|
||||
{
|
||||
BMK_benchResult_t benchres;
|
||||
U64 loopDurationC = 0, loopDurationD = 0;
|
||||
double uncertaintyConstantC = 3., uncertaintyConstantD = 3.;
|
||||
double winnerRS;
|
||||
|
||||
/* initial benchmarking, gives exact ratio and memory, warms up future runs */
|
||||
CBENCHMARK(1, benchres, tmp, BMK_both, BMK_iterMode, 1);
|
||||
CBENCHMARK(1, benchres, tmp, BMK_both, 2);
|
||||
|
||||
winnerRS = resultScore(*winnerResult, buf.srcSize, target);
|
||||
DEBUGOUTPUT("WinnerScore: %f\n ", winnerRS);
|
||||
@ -1557,12 +1564,12 @@ static int allBench(BMK_result_t* resultPtr,
|
||||
|
||||
/* calculate uncertainty in compression / decompression runs */
|
||||
if(benchres.cSpeed) {
|
||||
loopDurationC = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
|
||||
loopDurationC = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
|
||||
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
|
||||
}
|
||||
|
||||
if(benchres.dSpeed) {
|
||||
loopDurationD = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
|
||||
loopDurationD = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
|
||||
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
|
||||
}
|
||||
|
||||
@ -1571,14 +1578,14 @@ static int allBench(BMK_result_t* resultPtr,
|
||||
return WORSE_RESULT;
|
||||
}
|
||||
|
||||
/* second run, if first run is too short, gives approximate cSpeed + dSpeed */
|
||||
CBENCHMARK(loopDurationC < TIMELOOP_NANOSEC / 10, benchres, tmp, BMK_compressOnly, BMK_iterMode, 1);
|
||||
CBENCHMARK(loopDurationD < TIMELOOP_NANOSEC / 10, benchres, tmp, BMK_decodeOnly, BMK_iterMode, 1);
|
||||
/* ensure all measurements last a minimum time, to reduce measurement errors */
|
||||
assert(loopDurationC >= TIMELOOP_NANOSEC / 10);
|
||||
assert(loopDurationD >= TIMELOOP_NANOSEC / 10);
|
||||
|
||||
*resultPtr = benchres;
|
||||
|
||||
/* optimistic assumption of benchres */
|
||||
resultMax = benchres;
|
||||
{ BMK_benchResult_t resultMax = benchres;
|
||||
resultMax.cSpeed *= uncertaintyConstantC * VARIANCE;
|
||||
resultMax.dSpeed *= uncertaintyConstantD * VARIANCE;
|
||||
|
||||
@ -1588,9 +1595,7 @@ static int allBench(BMK_result_t* resultPtr,
|
||||
(!feas && (winnerRS > resultScore(resultMax, buf.srcSize, target)))) {
|
||||
return WORSE_RESULT;
|
||||
}
|
||||
|
||||
CBENCHMARK(loopDurationC < TIMELOOP_NANOSEC, benchres, tmp, BMK_compressOnly, BMK_timeMode, 1);
|
||||
CBENCHMARK(loopDurationD < TIMELOOP_NANOSEC, benchres, tmp, BMK_decodeOnly, BMK_timeMode, 1);
|
||||
}
|
||||
|
||||
*resultPtr = benchres;
|
||||
|
||||
@ -1604,13 +1609,14 @@ static int allBench(BMK_result_t* resultPtr,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define INFEASIBLE_THRESHOLD 200
|
||||
/* Memoized benchmarking, won't benchmark anything which has already been benchmarked before. */
|
||||
static int benchMemo(BMK_result_t* resultPtr,
|
||||
static int benchMemo(BMK_benchResult_t* resultPtr,
|
||||
const buffers_t buf, const contexts_t ctx,
|
||||
const paramValues_t cParams,
|
||||
const constraint_t target,
|
||||
BMK_result_t* winnerResult, memoTable_t* const memoTableArray,
|
||||
BMK_benchResult_t* winnerResult, memoTable_t* const memoTableArray,
|
||||
const int feas) {
|
||||
static int bmcount = 0;
|
||||
int res;
|
||||
@ -1631,6 +1637,7 @@ static int benchMemo(BMK_result_t* resultPtr,
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
typedef struct {
|
||||
U64 cSpeed_min;
|
||||
U64 dSpeed_min;
|
||||
@ -1662,7 +1669,7 @@ static void BMK_init_level_constraints(int bytePerSec_level1)
|
||||
static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
|
||||
const buffers_t buf, const contexts_t ctx)
|
||||
{
|
||||
BMK_result_t testResult;
|
||||
BMK_benchResult_t testResult;
|
||||
int better = 0;
|
||||
int cLevel;
|
||||
|
||||
@ -1729,16 +1736,16 @@ static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
|
||||
/* too large compression speed difference for the compression benefit */
|
||||
if (W_ratio > O_ratio)
|
||||
DISPLAY ("Compression Speed : %5.3f @ %4.1f MB/s vs %5.3f @ %4.1f MB/s : not enough for level %i\n",
|
||||
W_ratio, (double)testResult.cSpeed / (1 MB),
|
||||
O_ratio, (double)winners[cLevel].result.cSpeed / (1 MB), cLevel);
|
||||
W_ratio, (double)testResult.cSpeed / MB_UNIT,
|
||||
O_ratio, (double)winners[cLevel].result.cSpeed / MB_UNIT, cLevel);
|
||||
continue;
|
||||
}
|
||||
if (W_DSpeed_note < O_DSpeed_note ) {
|
||||
/* too large decompression speed difference for the compression benefit */
|
||||
if (W_ratio > O_ratio)
|
||||
DISPLAY ("Decompression Speed : %5.3f @ %4.1f MB/s vs %5.3f @ %4.1f MB/s : not enough for level %i\n",
|
||||
W_ratio, (double)testResult.dSpeed / (1 MB),
|
||||
O_ratio, (double)winners[cLevel].result.dSpeed / (1 MB), cLevel);
|
||||
W_ratio, (double)testResult.dSpeed / MB_UNIT,
|
||||
O_ratio, (double)winners[cLevel].result.dSpeed / MB_UNIT, cLevel);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1828,11 +1835,11 @@ static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
|
||||
if (f==NULL) { DISPLAY("error opening %s \n", rfName); exit(1); }
|
||||
|
||||
if (g_target) {
|
||||
BMK_init_level_constraints(g_target * (1 MB));
|
||||
BMK_init_level_constraints(g_target * MB_UNIT);
|
||||
} else {
|
||||
/* baseline config for level 1 */
|
||||
paramValues_t const l1params = cParamsToPVals(ZSTD_getCParams(1, buf.maxBlockSize, ctx.dictSize));
|
||||
BMK_result_t testResult;
|
||||
BMK_benchResult_t testResult;
|
||||
BMK_benchParam(&testResult, buf, ctx, l1params);
|
||||
BMK_init_level_constraints((int)((testResult.cSpeed * 31) / 32));
|
||||
}
|
||||
@ -1861,12 +1868,13 @@ static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Single Benchmark Functions
|
||||
**************************************/
|
||||
|
||||
static int benchOnce(const buffers_t buf, const contexts_t ctx, const int cLevel) {
|
||||
BMK_result_t testResult;
|
||||
BMK_benchResult_t testResult;
|
||||
g_params = adjustParams(overwriteParams(cParamsToPVals(ZSTD_getCParams(cLevel, buf.maxBlockSize, ctx.dictSize)), g_params), buf.maxBlockSize, ctx.dictSize);
|
||||
|
||||
if (BMK_benchParam(&testResult, buf, ctx, g_params)) {
|
||||
@ -1883,7 +1891,7 @@ static int benchSample(double compressibility, int cLevel)
|
||||
{
|
||||
const char* const name = "Sample 10MB";
|
||||
size_t const benchedSize = 10 MB;
|
||||
void* srcBuffer = malloc(benchedSize);
|
||||
void* const srcBuffer = malloc(benchedSize);
|
||||
int ret = 0;
|
||||
|
||||
buffers_t buf;
|
||||
@ -1927,7 +1935,8 @@ static int benchSample(double compressibility, int cLevel)
|
||||
/* benchFiles() :
|
||||
* note: while this function takes a table of filenames,
|
||||
* in practice, only the first filename will be used */
|
||||
int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileName, const int cLevel)
|
||||
int benchFiles(const char** fileNamesTable, int nbFiles,
|
||||
const char* dictFileName, int cLevel)
|
||||
{
|
||||
buffers_t buf;
|
||||
contexts_t ctx;
|
||||
@ -1986,7 +1995,8 @@ int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileNam
|
||||
static winnerInfo_t climbOnce(const constraint_t target,
|
||||
memoTable_t* mtAll,
|
||||
const buffers_t buf, const contexts_t ctx,
|
||||
const paramValues_t init) {
|
||||
const paramValues_t init)
|
||||
{
|
||||
/*
|
||||
* cparam - currently considered 'center'
|
||||
* candidate - params to benchmark/results
|
||||
@ -2000,11 +2010,9 @@ static winnerInfo_t climbOnce(const constraint_t target,
|
||||
winnerInfo = initWinnerInfo(init);
|
||||
candidateInfo = winnerInfo;
|
||||
|
||||
{
|
||||
winnerInfo_t bestFeasible1 = initWinnerInfo(cparam);
|
||||
{ winnerInfo_t bestFeasible1 = initWinnerInfo(cparam);
|
||||
DEBUGOUTPUT("Climb Part 1\n");
|
||||
while(better) {
|
||||
|
||||
int offset;
|
||||
size_t i, dist;
|
||||
const size_t varLen = mtAll[cparam.vals[strt_ind]].varLen;
|
||||
@ -2033,7 +2041,7 @@ static winnerInfo_t climbOnce(const constraint_t target,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} /* for (i = 0; i < varLen; i++) */
|
||||
|
||||
if(better) {
|
||||
continue;
|
||||
@ -2047,8 +2055,10 @@ static winnerInfo_t climbOnce(const constraint_t target,
|
||||
/* param error checking already done here */
|
||||
paramVariation(&candidateInfo.params, mtAll, (U32)dist);
|
||||
|
||||
res = benchMemo(&candidateInfo.result, buf, ctx,
|
||||
sanitizeParams(candidateInfo.params), target, &winnerInfo.result, mtAll, feas);
|
||||
res = benchMemo(&candidateInfo.result,
|
||||
buf, ctx,
|
||||
sanitizeParams(candidateInfo.params), target,
|
||||
&winnerInfo.result, mtAll, feas);
|
||||
DEBUGOUTPUT("Res: %d\n", res);
|
||||
if (res == BETTER_RESULT) { /* synonymous with better in this case*/
|
||||
winnerInfo = candidateInfo;
|
||||
@ -2058,16 +2068,15 @@ static winnerInfo_t climbOnce(const constraint_t target,
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (better) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} /* for(dist = 2; dist < varLen + 2; dist++) */
|
||||
|
||||
if (!better) { /* infeas -> feas -> stop */
|
||||
if(feas) { return winnerInfo; }
|
||||
|
||||
if (feas) return winnerInfo;
|
||||
feas = 1;
|
||||
better = 1;
|
||||
winnerInfo = bestFeasible1; /* note with change, bestFeasible may not necessarily be feasible, but if one has been benchmarked, it will be. */
|
||||
@ -2245,7 +2254,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
|
||||
|
||||
/* Don't want it to return anything worse than the best known result */
|
||||
if (g_singleRun) {
|
||||
BMK_result_t res;
|
||||
BMK_benchResult_t res;
|
||||
g_params = adjustParams(overwriteParams(cParamsToPVals(ZSTD_getCParams(cLevelRun, buf.maxBlockSize, ctx.dictSize)), g_params), buf.maxBlockSize, ctx.dictSize);
|
||||
if (BMK_benchParam(&res, buf, ctx, g_params)) {
|
||||
ret = 45;
|
||||
@ -2272,8 +2281,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
|
||||
DISPLAYLEVEL(2, "\n");
|
||||
findClockGranularity();
|
||||
|
||||
{
|
||||
paramValues_t CParams;
|
||||
{ paramValues_t CParams;
|
||||
|
||||
/* find best solution from default params */
|
||||
{
|
||||
@ -2281,7 +2289,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
|
||||
const int maxSeeds = g_noSeed ? 1 : ZSTD_maxCLevel();
|
||||
DEBUGOUTPUT("Strategy Selection\n");
|
||||
if(paramTarget.vals[strt_ind] == PARAM_UNSET) {
|
||||
BMK_result_t candidate;
|
||||
BMK_benchResult_t candidate;
|
||||
int i;
|
||||
for (i=1; i<=maxSeeds; i++) {
|
||||
int ec;
|
||||
|
@ -1020,6 +1020,59 @@ static int basicUnitTests(U32 seed, double compressibility)
|
||||
}
|
||||
DISPLAYLEVEL(3, "OK \n");
|
||||
|
||||
DISPLAYLEVEL(3, "test%3i : dictionary + uncompressible block + reusing tables checks offset table validity: ", testNb++);
|
||||
{ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(
|
||||
dictionary.start, dictionary.filled,
|
||||
ZSTD_dlm_byRef, ZSTD_dct_fullDict,
|
||||
ZSTD_getCParams(3, 0, dictionary.filled),
|
||||
ZSTD_defaultCMem);
|
||||
const size_t inbufsize = 2 * 128 * 1024; /* 2 blocks */
|
||||
const size_t outbufsize = ZSTD_compressBound(inbufsize);
|
||||
size_t inbufpos = 0;
|
||||
size_t cursegmentlen;
|
||||
BYTE *inbuf = (BYTE *)malloc(inbufsize);
|
||||
BYTE *outbuf = (BYTE *)malloc(outbufsize);
|
||||
BYTE *checkbuf = (BYTE *)malloc(inbufsize);
|
||||
size_t ret;
|
||||
|
||||
CHECK(cdict == NULL, "failed to alloc cdict");
|
||||
CHECK(inbuf == NULL, "failed to alloc input buffer");
|
||||
|
||||
/* first block is uncompressible */
|
||||
cursegmentlen = 128 * 1024;
|
||||
RDG_genBuffer(inbuf + inbufpos, cursegmentlen, 0., 0., seed);
|
||||
inbufpos += cursegmentlen;
|
||||
|
||||
/* second block is compressible */
|
||||
cursegmentlen = 128 * 1024 - 256;
|
||||
RDG_genBuffer(inbuf + inbufpos, cursegmentlen, 0.05, 0., seed);
|
||||
inbufpos += cursegmentlen;
|
||||
|
||||
/* and includes a very long backref */
|
||||
cursegmentlen = 128;
|
||||
memcpy(inbuf + inbufpos, dictionary.start + 256, cursegmentlen);
|
||||
inbufpos += cursegmentlen;
|
||||
|
||||
/* and includes a very long backref */
|
||||
cursegmentlen = 128;
|
||||
memcpy(inbuf + inbufpos, dictionary.start + 128, cursegmentlen);
|
||||
inbufpos += cursegmentlen;
|
||||
|
||||
ret = ZSTD_compress_usingCDict(zc, outbuf, outbufsize, inbuf, inbufpos, cdict);
|
||||
CHECK_Z(ret);
|
||||
|
||||
ret = ZSTD_decompress_usingDict(zd, checkbuf, inbufsize, outbuf, ret, dictionary.start, dictionary.filled);
|
||||
CHECK_Z(ret);
|
||||
|
||||
CHECK(memcmp(inbuf, checkbuf, inbufpos), "start and finish buffers don't match");
|
||||
|
||||
ZSTD_freeCDict(cdict);
|
||||
free(inbuf);
|
||||
free(outbuf);
|
||||
free(checkbuf);
|
||||
}
|
||||
DISPLAYLEVEL(3, "OK \n");
|
||||
|
||||
_end:
|
||||
FUZ_freeDictionary(dictionary);
|
||||
ZSTD_freeCStream(zc);
|
||||
|
Loading…
x
Reference in New Issue
Block a user