bench: changed creation/reset function to timedFnState

for consistency
dev
Yann Collet 2018-08-21 18:19:27 -07:00
parent 1af27a7ed7
commit 77e805e3db
3 changed files with 287 additions and 277 deletions

View File

@ -124,8 +124,8 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER;
* Benchmark Parameters
***************************************/
BMK_advancedParams_t BMK_initAdvancedParams(void) {
BMK_advancedParams_t res = {
BMK_advancedParams_t BMK_initAdvancedParams(void) {
BMK_advancedParams_t res = {
BMK_both, /* mode */
BMK_timeMode, /* loopMode */
BMK_TIMETEST_DEFAULT_S, /* nbSeconds */
@ -133,7 +133,7 @@ BMK_advancedParams_t BMK_initAdvancedParams(void) {
0, /* nbWorkers */
0, /* realTime */
0, /* additionalParam */
0, /* ldmFlag */
0, /* ldmFlag */
0, /* ldmMinMatch */
0, /* ldmHashLog */
0, /* ldmBuckSizeLog */
@ -168,8 +168,8 @@ struct BMK_timeState_t{
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
static void BMK_initCCtx(ZSTD_CCtx* ctx,
const void* dictBuffer, size_t dictBufferSize, int cLevel,
static void BMK_initCCtx(ZSTD_CCtx* ctx,
const void* dictBuffer, size_t dictBufferSize, int cLevel,
const ZSTD_compressionParameters* comprParams, const BMK_advancedParams_t* adv) {
ZSTD_CCtx_reset(ctx);
ZSTD_CCtx_resetParameters(ctx);
@ -195,7 +195,7 @@ static void BMK_initCCtx(ZSTD_CCtx* ctx,
}
static void BMK_initDCtx(ZSTD_DCtx* dctx,
static void BMK_initDCtx(ZSTD_DCtx* dctx,
const void* dictBuffer, size_t dictBufferSize) {
ZSTD_DCtx_reset(dctx);
ZSTD_DCtx_loadDictionary(dctx, dictBuffer, dictBufferSize);
@ -230,8 +230,8 @@ static size_t local_initDCtx(void* payload) {
/* additional argument is just the context */
static size_t local_defaultCompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_CCtx* ctx = (ZSTD_CCtx*)addArgs;
@ -257,8 +257,8 @@ static size_t local_defaultCompress(
/* additional argument is just the context */
static size_t local_defaultDecompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_DCtx* dctx = (ZSTD_DCtx*)addArgs;
@ -294,7 +294,7 @@ BMK_customReturn_t BMK_benchFunction(
BMK_initFn_t initFn, void* initPayload,
size_t blockCount,
const void* const * const srcBlockBuffers, const size_t* srcBlockSizes,
void* const * const dstBlockBuffers, const size_t* dstBlockCapacities, size_t* blockResult,
void* const * const dstBlockBuffers, const size_t* dstBlockCapacities, size_t* blockResult,
unsigned nbLoops) {
size_t dstSize = 0;
U64 totalTime;
@ -312,15 +312,15 @@ BMK_customReturn_t BMK_benchFunction(
memset(dstBlockBuffers[i], 0xE5, dstBlockCapacities[i]); /* warm up and erase result buffer */
}
#if 0
/* based on testing these seem to lower accuracy of multiple calls of 1 nbLoops vs 1 call of multiple nbLoops
* (Makes former slower)
/* based on testing these seem to lower accuracy of multiple calls of 1 nbLoops vs 1 call of multiple nbLoops
* (Makes former slower)
*/
UTIL_sleepMilli(5); /* give processor time to other processes */
UTIL_waitForNextTick();
#endif
}
{
{
unsigned i, j;
clockStart = UTIL_getTime();
if(initFn != NULL) { initFn(initPayload); }
@ -335,7 +335,7 @@ BMK_customReturn_t BMK_benchFunction(
if(blockResult != NULL) {
blockResult[j] = res;
}
}
}
}
}
totalTime = UTIL_clockSpanNano(clockStart);
@ -345,27 +345,27 @@ BMK_customReturn_t BMK_benchFunction(
retval.result.nanoSecPerRun = totalTime / nbLoops;
retval.result.sumOfReturn = dstSize;
return retval;
}
}
#define MINUSABLETIME 500000000ULL /* 0.5 seconds in ns */
void BMK_resetTimeState(BMK_timedFnState_t* r, unsigned nbSeconds) {
void BMK_resetTimedFnState(BMK_timedFnState_t* r, unsigned nbSeconds) {
r->nbLoops = 1;
r->timeRemaining = (U64)nbSeconds * TIMELOOP_NANOSEC;
r->coolTime = UTIL_getTime();
r->fastestTime = (U64)(-1LL);
}
BMK_timedFnState_t* BMK_createTimeState(unsigned nbSeconds) {
BMK_timedFnState_t* BMK_createTimedFnState(unsigned nbSeconds) {
BMK_timedFnState_t* r = (BMK_timedFnState_t*)malloc(sizeof(struct BMK_timeState_t));
if(r == NULL) {
return r;
}
BMK_resetTimeState(r, nbSeconds);
BMK_resetTimedFnState(r, nbSeconds);
return r;
}
void BMK_freeTimeState(BMK_timedFnState_t* state) {
void BMK_freeTimedFnState(BMK_timedFnState_t* state) {
free(state);
}
@ -376,7 +376,7 @@ BMK_customTimedReturn_t BMK_benchFunctionTimed(
BMK_initFn_t initFn, void* initPayload,
size_t blockCount,
const void* const* const srcBlockBuffers, const size_t* srcBlockSizes,
void * const * const dstBlockBuffers, const size_t * dstBlockCapacities, size_t* blockResults)
void * const * const dstBlockBuffers, const size_t * dstBlockCapacities, size_t* blockResults)
{
U64 fastest = cont->fastestTime;
int completed = 0;
@ -402,7 +402,7 @@ BMK_customTimedReturn_t BMK_benchFunctionTimed(
{ U64 const loopDuration = r.result.result.nanoSecPerRun * cont->nbLoops;
r.completed = (cont->timeRemaining <= loopDuration);
cont->timeRemaining -= loopDuration;
if (loopDuration > (TIMELOOP_NANOSEC / 100)) {
if (loopDuration > (TIMELOOP_NANOSEC / 100)) {
fastest = MIN(fastest, r.result.result.nanoSecPerRun);
if(loopDuration >= MINUSABLETIME) {
r.result.result.nanoSecPerRun = fastest;
@ -427,7 +427,7 @@ BMK_customTimedReturn_t BMK_benchFunctionTimed(
/* benchMem with no allocation */
static BMK_return_t BMK_benchMemAdvancedNoAlloc(
const void ** const srcPtrs, size_t* const srcSizes,
void** const cPtrs, size_t* const cCapacities, size_t* const cSizes,
void** const cPtrs, size_t* const cCapacities, size_t* const cSizes,
void** const resPtrs, size_t* const resSizes,
void** resultBufferPtr, void* compressedBuffer,
const size_t maxCompressedSize,
@ -438,7 +438,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
const int cLevel, const ZSTD_compressionParameters* comprParams,
const void* dictBuffer, size_t dictBufferSize,
ZSTD_CCtx* ctx, ZSTD_DCtx* dctx,
int displayLevel, const char* displayName, const BMK_advancedParams_t* adv)
int displayLevel, const char* displayName, const BMK_advancedParams_t* adv)
{
size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize); /* avoid div by 0 */
BMK_return_t results = { { 0, 0, 0, 0 }, 0 } ;
@ -447,7 +447,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
double ratio = 0.;
U32 nbBlocks;
if(!ctx || !dctx)
if(!ctx || !dctx)
EXM_THROW(31, BMK_return_t, "error: passed in null context");
/* init */
@ -466,16 +466,16 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
free(*resultBufferPtr);
*resultBufferPtr = malloc(decodedSize);
if (!(*resultBufferPtr)) {
EXM_THROW(33, BMK_return_t, "not enough memory");
EXM_THROW(33, BMK_return_t, "not enough memory");
}
if (totalDSize64 > decodedSize) {
free(*resultBufferPtr);
free(*resultBufferPtr);
EXM_THROW(32, BMK_return_t, "original size is too large"); /* size_t overflow */
}
cSize = srcSize;
srcSize = decodedSize;
ratio = (double)srcSize / (double)cSize;
}
}
}
/* Init data blocks */
@ -499,8 +499,8 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
cPtr += cCapacities[nbBlocks];
resPtr += thisBlockSize;
remaining -= thisBlockSize;
}
}
}
}
}
/* warmimg up memory */
@ -511,7 +511,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
}
/* Bench */
{
{
U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0);
# define NB_MARKS 4
const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
@ -545,7 +545,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
intermediateResultDecompress.completed = 0;
}
while(!(intermediateResultCompress.completed && intermediateResultDecompress.completed)) {
if(!intermediateResultCompress.completed) {
if(!intermediateResultCompress.completed) {
intermediateResultCompress = BMK_benchFunctionTimed(timeStateCompress, &local_defaultCompress, (void*)ctx, &local_initCCtx, (void*)&cctxprep,
nbBlocks, srcPtrs, srcSizes, cPtrs, cCapacities, cSizes);
if(intermediateResultCompress.result.error) {
@ -553,7 +553,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
return results;
}
ratio = (double)(srcSize / intermediateResultCompress.result.result.sumOfReturn);
{
{
int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
results.result.cSpeed = (srcSize * TIMELOOP_NANOSEC / intermediateResultCompress.result.result.nanoSecPerRun);
cSize = intermediateResultCompress.result.result.sumOfReturn;
@ -574,8 +574,8 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
results.error = intermediateResultDecompress.result.error;
return results;
}
{
{
int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
results.result.dSpeed = (srcSize * TIMELOOP_NANOSEC/ intermediateResultDecompress.result.result.nanoSecPerRun);
markNb = (markNb+1) % NB_MARKS;
@ -592,7 +592,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
if(adv->mode != BMK_decodeOnly) {
BMK_customReturn_t compressionResults = BMK_benchFunction(&local_defaultCompress, (void*)ctx, &local_initCCtx, (void*)&cctxprep,
nbBlocks, srcPtrs, srcSizes, cPtrs, cCapacities, cSizes, adv->nbSeconds);
nbBlocks, srcPtrs, srcSizes, cPtrs, cCapacities, cSizes, adv->nbSeconds);
if(compressionResults.error) {
results.error = compressionResults.error;
return results;
@ -605,7 +605,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
}
results.result.cSize = compressionResults.result.sumOfReturn;
{
{
int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
cSize = compressionResults.result.sumOfReturn;
results.result.cSize = cSize;
@ -621,7 +621,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
BMK_customReturn_t decompressionResults = BMK_benchFunction(
&local_defaultDecompress, (void*)(dctx),
&local_initDCtx, (void*)&dctxprep, nbBlocks,
(const void* const*)cPtrs, cSizes, resPtrs, resSizes, NULL,
(const void* const*)cPtrs, cSizes, resPtrs, resSizes, NULL,
adv->nbSeconds);
if(decompressionResults.error) {
results.error = decompressionResults.error;
@ -634,7 +634,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
results.result.dSpeed = srcSize * TIMELOOP_NANOSEC / decompressionResults.result.nanoSecPerRun;
}
{
{
int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
markNb = (markNb+1) % NB_MARKS;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r",
@ -683,9 +683,9 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
}
if (u==srcSize-1) { /* should never happen */
DISPLAY("no difference detected\n");
}
}
}
}
}
} /* CRC Checking */
if (displayLevel == 1) { /* hidden display mode -q, used by python speed benchmark */
@ -705,7 +705,7 @@ static BMK_return_t BMK_benchMemAdvancedNoAlloc(
}
BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstCapacity,
void* dstBuffer, size_t dstCapacity,
const size_t* fileSizes, unsigned nbFiles,
const int cLevel, const ZSTD_compressionParameters* comprParams,
const void* dictBuffer, size_t dictBufferSize,
@ -727,8 +727,8 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
void ** const resPtrs = (void**)malloc(maxNbBlocks * sizeof(void*));
size_t* const resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
BMK_timedFnState_t* timeStateCompress = BMK_createTimeState(adv->nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimeState(adv->nbSeconds);
BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(adv->nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(adv->nbSeconds);
ZSTD_CCtx* ctx = ZSTD_createCCtx();
ZSTD_DCtx* dctx = ZSTD_createDCtx();
@ -744,8 +744,8 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
void* resultBuffer = srcSize ? malloc(srcSize) : NULL;
int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs ||
!cSizes || !cCapacities || !resPtrs || !resSizes ||
int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs ||
!cSizes || !cCapacities || !resPtrs || !resSizes ||
!timeStateCompress || !timeStateDecompress || !compressedBuffer || !resultBuffer;
@ -756,20 +756,20 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
srcBuffer, srcSize, fileSizes, nbFiles, cLevel, comprParams,
dictBuffer, dictBufferSize, ctx, dctx, displayLevel, displayName, adv);
}
/* clean up */
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
ZSTD_freeCCtx(ctx);
ZSTD_freeDCtx(dctx);
free(internalDstBuffer);
free(resultBuffer);
free((void*)srcPtrs);
free(srcSizes);
free(cPtrs);
free((void*)srcPtrs);
free(srcSizes);
free(cPtrs);
free(cSizes);
free(cCapacities);
free(resPtrs);
@ -778,9 +778,9 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
if(allocationincomplete) {
EXM_THROW(31, BMK_return_t, "allocation error : not enough memory");
}
if(parametersConflict) {
EXM_THROW(32, BMK_return_t, "Conflicting input results");
EXM_THROW(32, BMK_return_t, "Conflicting input results");
}
return results;
}
@ -840,11 +840,11 @@ static BMK_return_t BMK_benchCLevel(const void* srcBuffer, size_t benchedSize,
if (displayLevel == 1 && !adv->additionalParam)
DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (U32)benchedSize, adv->nbSeconds, (U32)(adv->blockSize>>10));
res = BMK_benchMemAdvanced(srcBuffer, benchedSize,
NULL, 0,
fileSizes, nbFiles,
cLevel, comprParams,
dictBuffer, dictBufferSize,
res = BMK_benchMemAdvanced(srcBuffer, benchedSize,
NULL, 0,
fileSizes, nbFiles,
cLevel, comprParams,
dictBuffer, dictBufferSize,
displayLevel, displayName, adv);
return res;
@ -855,7 +855,7 @@ static BMK_return_t BMK_benchCLevel(const void* srcBuffer, size_t benchedSize,
* Loads `buffer` with content of files listed within `fileNamesTable`.
* At most, fills `buffer` entirely. */
static int BMK_loadFiles(void* buffer, size_t bufferSize,
size_t* fileSizes, const char* const * const fileNamesTable,
size_t* fileSizes, const char* const * const fileNamesTable,
unsigned nbFiles, int displayLevel)
{
size_t pos = 0, totalSize = 0;
@ -890,8 +890,8 @@ static int BMK_loadFiles(void* buffer, size_t bufferSize,
}
BMK_return_t BMK_benchFilesAdvanced(const char* const * const fileNamesTable, unsigned const nbFiles,
const char* const dictFileName, int const cLevel,
const ZSTD_compressionParameters* const compressionParams,
const char* const dictFileName, int const cLevel,
const ZSTD_compressionParameters* const compressionParams,
int displayLevel, const BMK_advancedParams_t * const adv)
{
void* srcBuffer = NULL;
@ -960,13 +960,13 @@ BMK_return_t BMK_benchFilesAdvanced(const char* const * const fileNamesTable, un
{
char mfName[20] = {0};
snprintf (mfName, sizeof(mfName), " %u files", nbFiles);
{
{
const char* const displayName = (nbFiles > 1) ? mfName : fileNamesTable[0];
res = BMK_benchCLevel(srcBuffer, benchedSize,
fileSizes, nbFiles,
res = BMK_benchCLevel(srcBuffer, benchedSize,
fileSizes, nbFiles,
cLevel, compressionParams,
dictBuffer, dictBufferSize,
displayLevel, displayName,
dictBuffer, dictBufferSize,
displayLevel, displayName,
adv);
} }
@ -1000,10 +1000,10 @@ BMK_return_t BMK_syntheticTest(int cLevel, double compressibility,
/* Bench */
snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100));
res = BMK_benchCLevel(srcBuffer, benchedSize,
&benchedSize, 1,
cLevel, compressionParams,
NULL, 0,
res = BMK_benchCLevel(srcBuffer, benchedSize,
&benchedSize, 1,
cLevel, compressionParams,
NULL, 0,
displayLevel, name, adv);
/* clean up */
@ -1013,8 +1013,8 @@ BMK_return_t BMK_syntheticTest(int cLevel, double compressibility,
}
BMK_return_t BMK_benchFiles(const char* const * const fileNamesTable, unsigned const nbFiles,
const char* const dictFileName,
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
const char* const dictFileName,
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
int displayLevel) {
const BMK_advancedParams_t adv = BMK_initAdvancedParams();
return BMK_benchFilesAdvanced(fileNamesTable, nbFiles, dictFileName, cLevel, compressionParams, displayLevel, &adv);

View File

@ -19,11 +19,12 @@ extern "C" {
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
#include "zstd.h" /* ZSTD_compressionParameters */
/* Creates a struct of type typeName with an int type .error field
* and a .result field of some baseType. Functions with return
* typeName pass a successful result with .error = 0 and .result
* with the intended result, while returning an error will result
* in .error != 0.
/* Creates a struct type typeName, featuring:
* - an .error field of type int
* - a .result field of some baseType.
* Functions with return type typeName
* will either be successful, with .error = 0, providing a valid .result,
* or return an error, with .error != 0, in which case .result is invalid.
*/
#define ERROR_STRUCT(baseType, typeName) typedef struct { \
baseType result; \
@ -32,36 +33,37 @@ extern "C" {
typedef struct {
size_t cSize;
U64 cSpeed; /* bytes / sec */
U64 dSpeed;
size_t cMem;
unsigned long long cSpeed; /* bytes / sec */
unsigned long long dSpeed;
size_t cMem; /* ? what does it reports ? */
} BMK_result_t;
ERROR_STRUCT(BMK_result_t, BMK_return_t);
/* called in cli */
/* Loads files in fileNamesTable into memory, as well as a dictionary
* from dictFileName, and then uses benchMem */
/* fileNamesTable - name of files to benchmark
* nbFiles - number of files (size of fileNamesTable), must be > 0
* dictFileName - name of dictionary file to load
* cLevel - compression level to benchmark, errors if invalid
* compressionParams - basic compression Parameters
* displayLevel - what gets printed
* 0 : no display;
* 1 : errors;
* 2 : + result + interaction + warnings;
* 3 : + progression;
/*! BMK_benchFiles() -- called by zstdcli */
/* Loads files from fileNamesTable into memory,
* loads dictionary from dictFileName,
* then uses benchMem().
* fileNamesTable - name of files to benchmark
* nbFiles - number of files (size of fileNamesTable), must be > 0 (what happens if not ?)
* dictFileName - name of dictionary file to load
* cLevel - compression level to benchmark, errors if invalid
* compressionParams - advanced compression Parameters
* displayLevel - what gets printed
* 0 : no display;
* 1 : errors;
* 2 : + result + interaction + warnings;
* 3 : + progression;
* 4 : + information
* return
* @return
* .error will give a nonzero error value if an error has occured
* .result - if .error = 0, .result will return the time taken to compression speed
* (.cSpeed), decompression speed (.dSpeed), and compressed size (.cSize) of the original
* file
* .result - only valid if .error = 0,
* .result will return compression speed (.cSpeed),
* decompression speed (.dSpeed), and compressed size (.cSize).
*/
BMK_return_t BMK_benchFiles(const char* const * const fileNamesTable, unsigned const nbFiles,
const char* const dictFileName,
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
BMK_return_t BMK_benchFiles(const char* const * fileNamesTable, unsigned nbFiles,
const char* dictFileName,
int cLevel, const ZSTD_compressionParameters* compressionParams,
int displayLevel);
typedef enum {
@ -85,7 +87,7 @@ typedef struct {
int additionalParam; /* used by python speed benchmark */
unsigned ldmFlag; /* enables long distance matching */
unsigned ldmMinMatch; /* below: parameters for long distance matching, see zstd.1.md for meaning */
unsigned ldmHashLog;
unsigned ldmHashLog;
unsigned ldmBucketSizeLog;
unsigned ldmHashEveryLog;
} BMK_advancedParams_t;
@ -93,67 +95,75 @@ typedef struct {
/* returns default parameters used by nonAdvanced functions */
BMK_advancedParams_t BMK_initAdvancedParams(void);
/* See benchFiles for normal parameter uses and return, see advancedParams_t for adv */
BMK_return_t BMK_benchFilesAdvanced(const char* const * const fileNamesTable, unsigned const nbFiles,
const char* const dictFileName,
int const cLevel, const ZSTD_compressionParameters* const compressionParams,
int displayLevel, const BMK_advancedParams_t* const adv);
/*! BMK_benchFilesAdvanced():
* Same as BMK_benchFiles(),
* with more controls, provided through advancedParams_t structure */
BMK_return_t BMK_benchFilesAdvanced(const char* const * fileNamesTable, unsigned nbFiles,
const char* dictFileName,
int cLevel, const ZSTD_compressionParameters* compressionParams,
int displayLevel, const BMK_advancedParams_t* adv);
/* called in cli */
/* Generates a sample with datagen with the compressibility argument*/
/* cLevel - compression level to benchmark, errors if invalid
* compressibility - determines compressibility of sample
* compressionParams - basic compression Parameters
* displayLevel - see benchFiles
* adv - see advanced_Params_t
* return
/*! BMK_syntheticTest() -- called from zstdcli */
/* Generates a sample with datagen, using compressibility argument */
/* cLevel - compression level to benchmark, errors if invalid
* compressibility - determines compressibility of sample
* compressionParams - basic compression Parameters
* displayLevel - see benchFiles
* adv - see advanced_Params_t
* @return:
* .error will give a nonzero error value if an error has occured
* .result - if .error = 0, .result will return the time taken to compression speed
* (.cSpeed), decompression speed (.dSpeed), and compressed size (.cSize) of the original
* file
* .result - only valid if .error = 0,
* .result will return the compression speed (.cSpeed),
* decompression speed (.dSpeed), and compressed size (.cSize).
*/
BMK_return_t BMK_syntheticTest(int cLevel, double compressibility,
const ZSTD_compressionParameters* compressionParams,
int displayLevel, const BMK_advancedParams_t * const adv);
/* basic benchmarking function, called in paramgrill
* applies ZSTD_compress_generic() and ZSTD_decompress_generic() on data in srcBuffer
* with specific compression parameters specified by other arguments using benchFunction
* (cLevel, comprParams + adv in advanced Mode) */
/* srcBuffer - data source, expected to be valid compressed data if in Decode Only Mode
* srcSize - size of data in srcBuffer
* cLevel - compression level
* comprParams - basic compression parameters
* dictBuffer - a dictionary if used, null otherwise
* dictBufferSize - size of dictBuffer, 0 otherwise
* diplayLevel - see BMK_benchFiles
* displayName - name used by display
* return
/** BMK_benchMem() -- core benchmarking function, called in paramgrill
* applies ZSTD_compress_generic() and ZSTD_decompress_generic() on data in srcBuffer
* with specific compression parameters provided by other arguments using benchFunction
* (cLevel, comprParams + adv in advanced Mode) */
/* srcBuffer - data source, expected to be valid compressed data if in Decode Only Mode
* srcSize - size of data in srcBuffer
* fileSizes - srcBuffer is considered cut into 1+ segments, to compress separately.
* note : sum(fileSizes) must be == srcSize. (<== ensure it's properly checked)
* nbFiles - nb of segments
* cLevel - compression level
* comprParams - basic compression parameters
* dictBuffer - a dictionary if used, null otherwise
* dictBufferSize - size of dictBuffer, 0 otherwise
* diplayLevel - see BMK_benchFiles
* displayName - name used by display
* @return
* .error will give a nonzero value if an error has occured
* .result - if .error = 0, will give the same results as benchFiles
* .result - only valid if .error = 0,
* provide the same results as benchFiles()
* but for the data stored in srcBuffer
*/
BMK_return_t BMK_benchMem(const void* srcBuffer, size_t srcSize,
const size_t* fileSizes, unsigned nbFiles,
const int cLevel, const ZSTD_compressionParameters* comprParams,
int cLevel, const ZSTD_compressionParameters* comprParams,
const void* dictBuffer, size_t dictBufferSize,
int displayLevel, const char* displayName);
/* See benchMem for normal parameter uses and return, see advancedParams_t for adv
/* BMK_benchMemAdvanced() : same as BMK_benchMem()
* with following additional options :
* dstBuffer - destination buffer to write compressed output in, NULL if none provided.
* dstCapacity - capacity of destination buffer, give 0 if dstBuffer = NULL
* adv = see advancedParams_t
*/
BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstCapacity,
void* dstBuffer, size_t dstCapacity,
const size_t* fileSizes, unsigned nbFiles,
const int cLevel, const ZSTD_compressionParameters* comprParams,
int cLevel, const ZSTD_compressionParameters* comprParams,
const void* dictBuffer, size_t dictBufferSize,
int displayLevel, const char* displayName,
const BMK_advancedParams_t* adv);
typedef struct {
size_t sumOfReturn; /* sum of return values */
U64 nanoSecPerRun; /* time per iteration */
unsigned long long nanoSecPerRun; /* time per iteration */
} BMK_customResult_t;
ERROR_STRUCT(BMK_customResult_t, BMK_customReturn_t);
@ -165,60 +175,60 @@ typedef size_t (*BMK_initFn_t)(void*);
/* benchFn - (*benchFn)(srcBuffers[i], srcSizes[i], dstBuffers[i], dstCapacities[i], benchPayload)
* is run nbLoops times
* initFn - (*initFn)(initPayload) is run once per benchmark at the beginning. This argument can
* initFn - (*initFn)(initPayload) is run once per benchmark at the beginning. This argument can
* be NULL, in which case nothing is run.
* blockCount - number of blocks (size of srcBuffers, srcSizes, dstBuffers, dstCapacities)
* blockCount - number of blocks. Size of all array parameters : srcBuffers, srcSizes, dstBuffers, dstCapacities, blockResults
* srcBuffers - an array of buffers to be operated on by benchFn
* srcSizes - an array of the sizes of above buffers
* dstBuffers - an array of buffers to be written into by benchFn
* dstCapacities - an array of the capacities of above buffers
* blockResults - the return value of benchFn called on each block.
* nbLoops - defines number of times benchFn is run.
* assumed array of size blockCount, will have compressed size of each block written to it.
* return
* @return:
* .error will give a nonzero value if ZSTD_isError() is nonzero for any of the return
* of the calls to initFn and benchFn, or if benchFunction errors internally
* .result - if .error = 0, then .result will contain the sum of all return values of
* benchFn on the first iteration through all of the blocks (.sumOfReturn) and also
* the time per run of benchFn (.nanoSecPerRun). For the former, this
* is generally intended to be used on functions which return the # of bytes written
* into dstBuffer, hence this value will be the total amount of bytes written to
* dstBuffer.
* .result - if .error = 0, then .result will contain
* the sum of all return values of benchFn on the first iteration through all of the blocks (.sumOfReturn)
* and also the time per run of benchFn (.nanoSecPerRun).
* For the former, this is generally intended to be used on functions which return the # of bytes written into dstBuffer,
* hence this value will be the total amount of bytes written into dstBuffer.
*/
BMK_customReturn_t BMK_benchFunction(BMK_benchFn_t benchFn, void* benchPayload,
BMK_initFn_t initFn, void* initPayload,
size_t blockCount,
const void* const * const srcBuffers, const size_t* srcSizes,
void * const * const dstBuffers, const size_t* dstCapacities, size_t* blockResults,
const void *const * srcBuffers, const size_t* srcSizes,
void *const * dstBuffers, const size_t* dstCapacities, size_t* blockResults,
unsigned nbLoops);
/* state information needed to advance computation for benchFunctionTimed */
typedef struct BMK_timeState_t BMK_timedFnState_t;
/* initializes timeState object with desired number of seconds */
BMK_timedFnState_t* BMK_createTimeState(unsigned nbSeconds);
BMK_timedFnState_t* BMK_createTimedFnState(unsigned nbSeconds);
/* resets existing timeState object */
void BMK_resetTimeState(BMK_timedFnState_t*, unsigned nbSeconds);
void BMK_resetTimedFnState(BMK_timedFnState_t*, unsigned nbSeconds);
/* deletes timeState object */
void BMK_freeTimeState(BMK_timedFnState_t* state);
void BMK_freeTimedFnState(BMK_timedFnState_t* state);
typedef struct {
BMK_customReturn_t result;
int completed;
} BMK_customTimedReturn_t;
/*
* Benchmarks custom functions like BMK_benchFunction(), but runs for nbSeconds seconds rather than a fixed number of loops
* arguments mostly the same other than BMK_benchFunction()
* Usage - benchFunctionTimed will return in approximately one second. Keep calling BMK_benchFunctionTimed() until the return's completed field = 1.
* to continue updating intermediate result. Intermediate return values are returned by the function.
/* BMK_benchFunctionTimed() :
* Same as BMK_benchFunction(), but runs for nbSeconds seconds rather than a fixed number of loops.
* Arguments are mostly the same other as BMK_benchFunction()
* Usage - benchFunctionTimed will return in approximately one second.
* Keep calling BMK_benchFunctionTimed() until @return.completed == 1,
* to continue updating intermediate result.
* Intermediate return values are returned by the function.
*/
BMK_customTimedReturn_t BMK_benchFunctionTimed(BMK_timedFnState_t* cont,
BMK_benchFn_t benchFn, void* benchPayload,
BMK_initFn_t initFn, void* initPayload,
size_t blockCount,
const void* const * const srcBlockBuffers, const size_t* srcBlockSizes,
void* const * const dstBlockBuffers, const size_t* dstBlockCapacities, size_t* blockResults);
const void *const * srcBlockBuffers, const size_t* srcBlockSizes,
void *const * dstBlockBuffers, const size_t* dstBlockCapacities, size_t* blockResults);
#endif /* BENCH_H_121279284357 */

View File

@ -68,7 +68,7 @@ static const int g_maxNbVariations = 64;
#define FADT_MIN 0
#define FADT_MAX ((U32)-1)
#define ZSTD_TARGETLENGTH_MIN 0
#define ZSTD_TARGETLENGTH_MIN 0
#define ZSTD_TARGETLENGTH_MAX 999
#define WLOG_RANGE (ZSTD_WINDOWLOG_MAX - ZSTD_WINDOWLOG_MIN + 1)
@ -115,27 +115,27 @@ typedef struct {
} paramValues_t;
/* maximum value of parameters */
static const U32 mintable[NUM_PARAMS] =
static const U32 mintable[NUM_PARAMS] =
{ ZSTD_WINDOWLOG_MIN, ZSTD_CHAINLOG_MIN, ZSTD_HASHLOG_MIN, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLENGTH_MIN, ZSTD_TARGETLENGTH_MIN, ZSTD_fast, FADT_MIN };
/* minimum value of parameters */
static const U32 maxtable[NUM_PARAMS] =
static const U32 maxtable[NUM_PARAMS] =
{ ZSTD_WINDOWLOG_MAX, ZSTD_CHAINLOG_MAX, ZSTD_HASHLOG_MAX, ZSTD_SEARCHLOG_MAX, ZSTD_SEARCHLENGTH_MAX, ZSTD_TARGETLENGTH_MAX, ZSTD_btultra, FADT_MAX };
/* # of values parameters can take on */
static const U32 rangetable[NUM_PARAMS] =
static const U32 rangetable[NUM_PARAMS] =
{ WLOG_RANGE, CLOG_RANGE, HLOG_RANGE, SLOG_RANGE, SLEN_RANGE, TLEN_RANGE, STRT_RANGE, FADT_RANGE };
/* ZSTD_cctxSetParameter() index to set */
static const ZSTD_cParameter cctxSetParamTable[NUM_PARAMS] =
static const ZSTD_cParameter cctxSetParamTable[NUM_PARAMS] =
{ ZSTD_p_windowLog, ZSTD_p_chainLog, ZSTD_p_hashLog, ZSTD_p_searchLog, ZSTD_p_minMatch, ZSTD_p_targetLength, ZSTD_p_compressionStrategy, ZSTD_p_forceAttachDict };
/* names of parameters */
static const char* g_paramNames[NUM_PARAMS] =
static const char* g_paramNames[NUM_PARAMS] =
{ "windowLog", "chainLog", "hashLog","searchLog", "searchLength", "targetLength", "strategy", "forceAttachDict" };
/* shortened names of parameters */
static const char* g_shortParamNames[NUM_PARAMS] =
static const char* g_shortParamNames[NUM_PARAMS] =
{ "wlog", "clog", "hlog","slog", "slen", "tlen", "strt", "fadt" };
/* maps value from { 0 to rangetable[param] - 1 } to valid paramvalues */
@ -178,7 +178,7 @@ static int invRangeMap(varInds_t param, U32 value) {
hi = mid;
}
}
return lo;
return lo;
}
case fadt_ind:
return (int)value + 1;
@ -201,11 +201,11 @@ static void displayParamVal(FILE* f, varInds_t param, U32 value, int width) {
switch(param) {
case fadt_ind: if(width) { fprintf(f, "%*d", width, (int)value); } else { fprintf(f, "%d", (int)value); } break;
case strt_ind: if(width) { fprintf(f, "%*s", width, g_stratName[value]); } else { fprintf(f, "%s", g_stratName[value]); } break;
case wlog_ind:
case clog_ind:
case hlog_ind:
case slog_ind:
case slen_ind:
case wlog_ind:
case clog_ind:
case hlog_ind:
case slog_ind:
case slen_ind:
case tlen_ind: if(width) { fprintf(f, "%*u", width, value); } else { fprintf(f, "%u", value); } break;
case NUM_PARAMS:
DISPLAY("Error, not a valid param\n "); break;
@ -265,7 +265,7 @@ typedef struct {
typedef struct {
U32 cSpeed; /* bytes / sec */
U32 dSpeed;
U32 cMem; /* bytes */
U32 cMem; /* bytes */
} constraint_t;
typedef struct winner_ll_node winner_ll_node;
@ -285,7 +285,7 @@ static winner_ll_node* g_winners; /* linked list sorted ascending by cSize & cSp
*/
/*-*******************************************************
* General Util Functions
* General Util Functions
*********************************************************/
/* nullified useless params, to ensure count stats */
@ -502,10 +502,10 @@ static int feasible(const BMK_result_t results, const constraint_t target) {
}
/* hill climbing value for part 1 */
/* Scoring here is a linear reward for all set constraints normalized between 0 to 1
* (with 0 at 0 and 1 being fully fulfilling the constraint), summed with a logarithmic
/* Scoring here is a linear reward for all set constraints normalized between 0 to 1
* (with 0 at 0 and 1 being fully fulfilling the constraint), summed with a logarithmic
* bonus to exceeding the constraint value. We also give linear ratio for compression ratio.
* The constant factors are experimental.
* The constant factors are experimental.
*/
static double resultScore(const BMK_result_t res, const size_t srcSize, const constraint_t target) {
double cs = 0., ds = 0., rt, cm = 0.;
@ -516,7 +516,7 @@ static double resultScore(const BMK_result_t res, const size_t srcSize, const co
if(target.cMem != (U32)-1) { cm = (double)target.cMem / res.cMem; }
rt = ((double)srcSize / res.cSize);
ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr +
ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr +
(MAX(0, log(cs))+ MAX(0, log(ds))+ MAX(0, log(cm))) * r2;
return ret;
@ -532,7 +532,7 @@ static double resultDistLvl(const BMK_result_t result1, const BMK_result_t lvlRe
return normalizedRatioGain1 * g_ratioMultiplier + normalizedCSpeedGain1;
}
/* return true if r2 strictly better than r1 */
/* return true if r2 strictly better than r1 */
static int compareResultLT(const BMK_result_t result1, const BMK_result_t result2, const constraint_t target, size_t srcSize) {
if(feasible(result1, target) && feasible(result2, target)) {
if(g_optmode) {
@ -547,7 +547,7 @@ static int compareResultLT(const BMK_result_t result1, const BMK_result_t result
static constraint_t relaxTarget(constraint_t target) {
target.cMem = (U32)-1;
target.cSpeed *= ((double)g_strictness) / 100;
target.cSpeed *= ((double)g_strictness) / 100;
target.dSpeed *= ((double)g_strictness) / 100;
return target;
}
@ -598,7 +598,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize) {
DISPLAY("Warning: hashlog too much larger than windowLog size, adjusted to %u\n", pc->vals[hlog_ind]);
}
}
if(pc->vals[slog_ind] != PARAM_UNSET && pc->vals[clog_ind] != PARAM_UNSET) {
if(pc->vals[slog_ind] > pc->vals[clog_ind]) {
pc->vals[clog_ind] = pc->vals[slog_ind];
@ -608,17 +608,17 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize) {
}
static int redundantParams(const paramValues_t paramValues, const constraint_t target, const size_t maxBlockSize) {
return
return
(ZSTD_estimateCStreamSize_usingCParams(pvalsToCParams(paramValues)) > (size_t)target.cMem) /* Uses too much memory */
|| ((1ULL << (paramValues.vals[wlog_ind] - 1)) >= maxBlockSize && paramValues.vals[wlog_ind] != mintable[wlog_ind]) /* wlog too much bigger than src size */
|| (paramValues.vals[clog_ind] > (paramValues.vals[wlog_ind] + (paramValues.vals[strt_ind] > ZSTD_btlazy2))) /* chainLog larger than windowLog*/
|| (paramValues.vals[slog_ind] > paramValues.vals[clog_ind]) /* searchLog larger than chainLog */
|| (paramValues.vals[hlog_ind] > paramValues.vals[wlog_ind] + 1); /* hashLog larger than windowLog + 1 */
}
/*-************************************
* Display Functions
* Display Functions
**************************************/
static void BMK_translateAdvancedParams(FILE* f, const paramValues_t params) {
@ -629,7 +629,7 @@ static void BMK_translateAdvancedParams(FILE* f, const paramValues_t params) {
if(g_silenceParams[v]) { continue; }
if(!first) { fprintf(f, ","); }
fprintf(f,"%s=", g_paramNames[v]);
if(v == strt_ind) { fprintf(f,"%u", params.vals[v]); }
else { displayParamVal(f, v, params.vals[v], 0); }
first = 0;
@ -668,10 +668,10 @@ static void BMK_printWinner(FILE* f, const int cLevel, const BMK_result_t result
snprintf(lvlstr, 15, " Level %2d ", cLevel);
}
if(TIMED) {
if(TIMED) {
const U64 time = UTIL_clockSpanNano(g_time);
const U64 minutes = time / (60ULL * TIMELOOP_NANOSEC);
fprintf(f, "%1lu:%2lu:%05.2f - ", (unsigned long) minutes / 60,(unsigned long) minutes % 60, (double)(time - minutes * TIMELOOP_NANOSEC * 60ULL)/TIMELOOP_NANOSEC);
fprintf(f, "%1lu:%2lu:%05.2f - ", (unsigned long) minutes / 60,(unsigned long) minutes % 60, (double)(time - minutes * TIMELOOP_NANOSEC * 60ULL)/TIMELOOP_NANOSEC);
}
fprintf(f, "/* %s */ ", lvlstr);
@ -735,7 +735,7 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
tmp = cur_node->next;
cur_node->next = cur_node->next->next;
free(tmp);
break;
break;
}
case SIZE_RESULT:
{
@ -754,7 +754,7 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
cur_node->next = newnode;
return 0;
}
}
}
}
@ -792,9 +792,9 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
cur_node->next = newnode;
return 0;
}
default:
default:
return 1;
}
}
}
static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t result, const paramValues_t params, const constraint_t targetConstraints, const size_t srcSize)
@ -814,7 +814,7 @@ static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t res
g_winner.result = result;
g_winner.params = params;
}
}
}
if(g_optmode && g_optimizer && (DEBUG || g_displayLevel == 3)) {
winnerInfo_t w;
@ -824,8 +824,8 @@ static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t res
insertWinner(w, targetConstraints);
if(!DEBUG) { fprintf(f, "\033c"); }
fprintf(f, "\n");
fprintf(f, "\n");
/* the table */
fprintf(f, "================================\n");
for(n = g_winners; n != NULL; n = n->next) {
@ -909,8 +909,8 @@ static size_t local_initDCtx(void* payload) {
/* additional argument is just the context */
static size_t local_defaultCompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_CCtx* ctx = (ZSTD_CCtx*)addArgs;
@ -937,8 +937,8 @@ static size_t local_defaultCompress(
/* additional argument is just the context */
static size_t local_defaultDecompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_DCtx* dctx = (ZSTD_DCtx*)addArgs;
@ -965,7 +965,7 @@ static size_t local_defaultDecompress(
}
/*-************************************
* Data Initialization Functions
* Data Initialization Functions
**************************************/
typedef struct {
@ -1041,7 +1041,7 @@ static int createBuffersFromMemory(buffers_t* buff, void * srcBuffer, const size
buff->dstSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
buff->resPtrs = (void**)calloc(maxNbBlocks, sizeof(void*));
buff->resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
buff->resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
if(!buff->srcPtrs || !buff->srcSizes || !buff->dstPtrs || !buff->dstCapacities || !buff->dstSizes || !buff->resPtrs || !buff->resSizes) {
DISPLAY("alloc error\n");
@ -1090,18 +1090,18 @@ static int createBuffersFromMemory(buffers_t* buff, void * srcBuffer, const size
buff->nbBlocks = blockNb;
return 0;
}
}
/* allocates buffer's arguments. returns success / failuere */
static int createBuffers(buffers_t* buff, const char* const * const fileNamesTable,
static int createBuffers(buffers_t* buff, const char* const * const fileNamesTable,
size_t nbFiles) {
size_t pos = 0;
size_t n;
size_t totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, (U32)nbFiles);
size_t benchedSize = MIN(BMK_findMaxMem(totalSizeToLoad * 3) / 3, totalSizeToLoad);
size_t* fileSizes = calloc(sizeof(size_t), nbFiles);
void* srcBuffer = NULL;
int ret = 0;
void* srcBuffer = NULL;
int ret = 0;
if(!totalSizeToLoad || !benchedSize) {
ret = 1;
@ -1139,7 +1139,7 @@ static int createBuffers(buffers_t* buff, const char* const * const fileNamesTab
if (fileSize + pos > benchedSize) fileSize = benchedSize - pos, nbFiles=n; /* buffer too small - stop after this file */
{
char* buffer = (char*)(srcBuffer);
char* buffer = (char*)(srcBuffer);
size_t const readSize = fread((buffer)+pos, 1, (size_t)fileSize, f);
fclose(f);
if (readSize != (size_t)fileSize) {
@ -1181,14 +1181,14 @@ static int createContexts(contexts_t* ctx, const char* dictFileName) {
ctx->dictBuffer = malloc(ctx->dictSize);
f = fopen(dictFileName, "rb");
if(!f) {
DISPLAY("unable to open file\n");
fclose(f);
freeContexts(*ctx);
return 1;
}
if(ctx->dictSize > 64 MB || !(ctx->dictBuffer)) {
DISPLAY("dictionary too large\n");
fclose(f);
@ -1207,7 +1207,7 @@ static int createContexts(contexts_t* ctx, const char* dictFileName) {
}
/*-************************************
* Optimizer Memoization Functions
* Optimizer Memoization Functions
**************************************/
/* return: new length */
@ -1218,7 +1218,7 @@ static size_t sanitizeVarArray(varInds_t* varNew, const size_t varLength, const
for(i = 0; i < varLength; i++) {
if( !((varArray[i] == clog_ind && strat == ZSTD_fast)
|| (varArray[i] == slog_ind && strat == ZSTD_fast)
|| (varArray[i] == slog_ind && strat == ZSTD_dfast)
|| (varArray[i] == slog_ind && strat == ZSTD_dfast)
|| (varArray[i] == tlen_ind && strat != ZSTD_btopt && strat != ZSTD_btultra && strat != ZSTD_fast))) {
varNew[j] = varArray[i];
j++;
@ -1305,7 +1305,7 @@ static void freeMemoTableArray(memoTable_t* const mtAll) {
static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t* const varyParams, const size_t varyLen, const U32 memoTableLog) {
memoTable_t* mtAll = (memoTable_t*)calloc(sizeof(memoTable_t),(ZSTD_btultra + 1));
ZSTD_strategy i, stratMin = ZSTD_fast, stratMax = ZSTD_btultra;
if(mtAll == NULL) {
return NULL;
}
@ -1324,7 +1324,7 @@ static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t*
return mtAll;
}
if(p.vals[strt_ind] != PARAM_UNSET) {
stratMin = p.vals[strt_ind];
stratMax = p.vals[strt_ind];
@ -1348,7 +1348,7 @@ static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t*
return NULL;
}
}
return mtAll;
}
@ -1363,7 +1363,7 @@ static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTa
int i;
for(i = 0; i < NUM_PARAMS; i++) {
varInds_t v = mt.varArray[i];
if(v == strt_ind) continue;
if(v == strt_ind) continue;
pc->vals[v] = rangeMap(v, FUZ_rand(&g_rand) % rangetable[v]);
}
@ -1382,7 +1382,7 @@ static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTa
/* if in decodeOnly, then srcPtr's will be compressed blocks, and uncompressedBlocks will be written to dstPtrs */
/* dictionary nullable, nothing else though. */
static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t ctx,
static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t ctx,
const int cLevel, const paramValues_t* comprParams,
const BMK_mode_t mode, const BMK_loopMode_t loopMode, const unsigned nbSeconds) {
@ -1427,9 +1427,9 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(loopMode == BMK_timeMode) {
BMK_customTimedReturn_t intermediateResultCompress;
BMK_customTimedReturn_t intermediateResultDecompress;
BMK_timedFnState_t* timeStateCompress = BMK_createTimeState(nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimeState(nbSeconds);
BMK_customTimedReturn_t intermediateResultDecompress;
BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(nbSeconds);
if(mode == BMK_compressOnly) {
intermediateResultCompress.completed = 0;
intermediateResultDecompress.completed = 1;
@ -1447,8 +1447,8 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(intermediateResultCompress.result.error) {
results.error = intermediateResultCompress.result.error;
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
return results;
}
results.result.cSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultCompress.result.result.nanoSecPerRun;
@ -1461,21 +1461,21 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(intermediateResultDecompress.result.error) {
results.error = intermediateResultDecompress.result.error;
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
return results;
}
results.result.dSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultDecompress.result.result.nanoSecPerRun;
}
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
} else { /* iterMode; */
if(mode != BMK_decodeOnly) {
BMK_customReturn_t compressionResults = BMK_benchFunction(&local_defaultCompress, (void*)cctx, &local_initCCtx, (void*)&cctxprep,
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes, nbSeconds);
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes, nbSeconds);
if(compressionResults.error) {
results.error = compressionResults.error;
return results;
@ -1537,7 +1537,7 @@ static int BMK_benchParam(BMK_result_t* resultPtr,
}
/* Benchmarking which stops when we are sufficiently sure the solution is infeasible / worse than the winner */
#define VARIANCE 1.2
#define VARIANCE 1.2
static int allBench(BMK_result_t* resultPtr,
const buffers_t buf, const contexts_t ctx,
const paramValues_t cParams,
@ -1557,13 +1557,13 @@ static int allBench(BMK_result_t* resultPtr,
/* calculate uncertainty in compression / decompression runs */
if(benchres.cSpeed) {
loopDurationC = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
loopDurationC = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
}
if(benchres.dSpeed) {
loopDurationD = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
loopDurationD = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
}
/* anything with worse ratio in feas is definitely worse, discard */
@ -1596,18 +1596,18 @@ static int allBench(BMK_result_t* resultPtr,
/* compare by resultScore when in infeas */
/* compare by compareResultLT when in feas */
if((!feas && (resultScore(benchres, buf.srcSize, target) > resultScore(*winnerResult, buf.srcSize, target))) ||
(feas && (compareResultLT(*winnerResult, benchres, target, buf.srcSize))) ) {
return BETTER_RESULT;
} else {
return WORSE_RESULT;
if((!feas && (resultScore(benchres, buf.srcSize, target) > resultScore(*winnerResult, buf.srcSize, target))) ||
(feas && (compareResultLT(*winnerResult, benchres, target, buf.srcSize))) ) {
return BETTER_RESULT;
} else {
return WORSE_RESULT;
}
}
#define INFEASIBLE_THRESHOLD 200
/* Memoized benchmarking, won't benchmark anything which has already been benchmarked before. */
static int benchMemo(BMK_result_t* resultPtr,
const buffers_t buf, const contexts_t ctx,
const buffers_t buf, const contexts_t ctx,
const paramValues_t cParams,
const constraint_t target,
BMK_result_t* winnerResult, memoTable_t* const memoTableArray,
@ -1615,7 +1615,7 @@ static int benchMemo(BMK_result_t* resultPtr,
static int bmcount = 0;
int res;
if(memoTableGet(memoTableArray, cParams) >= INFEASIBLE_THRESHOLD || redundantParams(cParams, target, buf.maxBlockSize)) { return WORSE_RESULT; }
if(memoTableGet(memoTableArray, cParams) >= INFEASIBLE_THRESHOLD || redundantParams(cParams, target, buf.maxBlockSize)) { return WORSE_RESULT; }
res = allBench(resultPtr, buf, ctx, cParams, target, winnerResult, feas);
@ -1659,7 +1659,7 @@ static void BMK_init_level_constraints(int bytePerSec_level1)
} }
}
static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
const buffers_t buf, const contexts_t ctx)
{
BMK_result_t testResult;
@ -1781,7 +1781,7 @@ static void playAround(FILE* f, winnerInfo_t* winners,
if (nbVariations++ > g_maxNbVariations) break;
do { for(i = 0; i < 4; i++) { paramVaryOnce(FUZ_rand(&g_rand) % (strt_ind + 1), ((FUZ_rand(&g_rand) & 1) << 1) - 1, &p); } }
do { for(i = 0; i < 4; i++) { paramVaryOnce(FUZ_rand(&g_rand) % (strt_ind + 1), ((FUZ_rand(&g_rand) & 1) << 1) - 1, &p); } }
while(!paramValid(p));
/* exclude faster if already played params */
@ -1815,7 +1815,7 @@ static void BMK_selectRandomStart(
}
}
static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
{
paramValues_t params;
winnerInfo_t winners[NB_LEVELS_TRACKED+1];
@ -1888,7 +1888,7 @@ static int benchSample(double compressibility, int cLevel)
buffers_t buf;
contexts_t ctx;
if(srcBuffer == NULL) {
DISPLAY("Out of Memory\n");
return 2;
@ -1967,12 +1967,12 @@ int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileNam
* Local Optimization Functions
**************************************/
/* One iteration of hill climbing. Specifically, it first tries all
/* One iteration of hill climbing. Specifically, it first tries all
* valid parameter configurations w/ manhattan distance 1 and picks the best one
* failing that, it progressively tries candidates further and further away (up to #dim + 2)
* if it finds a candidate exceeding winnerInfo, it will repeat. Otherwise, it will stop the
* current stage of hill climbing.
* Each iteration of hill climbing proceeds in 2 'phases'. Phase 1 climbs according to
* if it finds a candidate exceeding winnerInfo, it will repeat. Otherwise, it will stop the
* current stage of hill climbing.
* Each iteration of hill climbing proceeds in 2 'phases'. Phase 1 climbs according to
* the resultScore function, which is effectively a linear increase in reward until it reaches
* the constraint-satisfying value, it which point any excess results in only logarithmic reward.
* This aims to find some constraint-satisfying point.
@ -1980,14 +1980,14 @@ int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileNam
* all feasible solutions valued over all infeasible solutions.
*/
/* sanitize all params here.
/* sanitize all params here.
* all generation after random should be sanitized. (maybe sanitize random)
*/
static winnerInfo_t climbOnce(const constraint_t target,
memoTable_t* mtAll,
static winnerInfo_t climbOnce(const constraint_t target,
memoTable_t* mtAll,
const buffers_t buf, const contexts_t ctx,
const paramValues_t init) {
/*
/*
* cparam - currently considered 'center'
* candidate - params to benchmark/results
* winner - best option found so far.
@ -2017,8 +2017,8 @@ static winnerInfo_t climbOnce(const constraint_t target,
for(offset = -1; offset <= 1; offset += 2) {
CHECKTIME(winnerInfo);
candidateInfo.params = cparam;
paramVaryOnce(mtAll[cparam.vals[strt_ind]].varArray[i], offset, &candidateInfo.params);
paramVaryOnce(mtAll[cparam.vals[strt_ind]].varArray[i], offset, &candidateInfo.params);
if(paramValid(candidateInfo.params)) {
int res;
res = benchMemo(&candidateInfo.result, buf, ctx,
@ -2047,7 +2047,7 @@ static winnerInfo_t climbOnce(const constraint_t target,
/* param error checking already done here */
paramVariation(&candidateInfo.params, mtAll, (U32)dist);
res = benchMemo(&candidateInfo.result, buf, ctx,
res = benchMemo(&candidateInfo.result, buf, ctx,
sanitizeParams(candidateInfo.params), target, &winnerInfo.result, mtAll, feas);
DEBUGOUTPUT("Res: %d\n", res);
if(res == BETTER_RESULT) { /* synonymous with better in this case*/
@ -2065,8 +2065,8 @@ static winnerInfo_t climbOnce(const constraint_t target,
}
}
if(!better) { /* infeas -> feas -> stop */
if(feas) { return winnerInfo; }
if(!better) { /* infeas -> feas -> stop */
if(feas) { return winnerInfo; }
feas = 1;
better = 1;
@ -2084,17 +2084,17 @@ static winnerInfo_t climbOnce(const constraint_t target,
/* flexible parameters: iterations of failed climbing (or if we do non-random, maybe this is when everything is close to visitied)
weight more on visit for bad results, less on good results/more on later results / ones with more failures.
allocate memoTable here.
allocate memoTable here.
*/
static winnerInfo_t optimizeFixedStrategy(
const buffers_t buf, const contexts_t ctx,
const buffers_t buf, const contexts_t ctx,
const constraint_t target, paramValues_t paramTarget,
const ZSTD_strategy strat,
const ZSTD_strategy strat,
memoTable_t* memoTableArray, const int tries) {
int i = 0;
paramValues_t init;
winnerInfo_t winnerInfo, candidateInfo;
winnerInfo_t winnerInfo, candidateInfo;
winnerInfo = initWinnerInfo(emptyParams());
/* so climb is given the right fixed strategy */
paramTarget.vals[strt_ind] = strat;
@ -2104,7 +2104,7 @@ static winnerInfo_t optimizeFixedStrategy(
init = paramTarget;
for(i = 0; i < tries; i++) {
DEBUGOUTPUT("Restart\n");
DEBUGOUTPUT("Restart\n");
do { randomConstrainedParams(&init, memoTableArray, strat); } while(redundantParams(init, target, buf.maxBlockSize));
candidateInfo = climbOnce(target, memoTableArray, buf, ctx, init);
if(compareResultLT(winnerInfo.result, candidateInfo.result, target, buf.srcSize)) {
@ -2153,9 +2153,9 @@ static int nextStrategy(const int currentStrategy, const int bestStrategy) {
/* main fn called when using --optimize */
/* Does strategy selection by benchmarking default compression levels
* then optimizes by strategy, starting with the best one and moving
* then optimizes by strategy, starting with the best one and moving
* progressively moving further away by number
* args:
* args:
* fileNamesTable - list of files to benchmark
* nbFiles - length of fileNamesTable
* dictFileName - name of dictionary file if one, else NULL
@ -2167,7 +2167,7 @@ static int nextStrategy(const int currentStrategy, const int bestStrategy) {
static int g_maxTries = 5;
#define TRY_DECAY 1
static int optimizeForSize(const char* const * const fileNamesTable, const size_t nbFiles, const char* dictFileName, constraint_t target, paramValues_t paramTarget,
static int optimizeForSize(const char* const * const fileNamesTable, const size_t nbFiles, const char* dictFileName, constraint_t target, paramValues_t paramTarget,
const int cLevelOpt, const int cLevelRun, const U32 memoTableLog)
{
varInds_t varArray [NUM_PARAMS];
@ -2194,7 +2194,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
if(nbFiles == 1) {
DISPLAYLEVEL(2, "Loading %s... \r", fileNamesTable[0]);
} else {
DISPLAYLEVEL(2, "Loading %lu Files... \r", (unsigned long)nbFiles);
DISPLAYLEVEL(2, "Loading %lu Files... \r", (unsigned long)nbFiles);
}
/* sanitize paramTarget */
@ -2231,14 +2231,14 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
ret = 3;
goto _cleanUp;
}
g_lvltarget = winner.result;
g_lvltarget = winner.result;
g_lvltarget.cSpeed *= ((double)g_strictness) / 100;
g_lvltarget.dSpeed *= ((double)g_strictness) / 100;
g_lvltarget.cSize /= ((double)g_strictness) / 100;
target.cSpeed = (U32)g_lvltarget.cSpeed;
target.dSpeed = (U32)g_lvltarget.dSpeed;
target.cSpeed = (U32)g_lvltarget.cSpeed;
target.dSpeed = (U32)g_lvltarget.dSpeed;
BMK_printWinnerOpt(stdout, cLevelOpt, winner.result, winner.params, target, buf.srcSize);
}
@ -2272,7 +2272,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
DISPLAYLEVEL(2, "\n");
findClockGranularity();
{
{
paramValues_t CParams;
/* find best solution from default params */
@ -2305,13 +2305,13 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
DEBUGOUTPUT("Real Opt\n");
/* start 'real' optimization */
{
{
int bestStrategy = (int)winner.params.vals[strt_ind];
if(paramTarget.vals[strt_ind] == PARAM_UNSET) {
int st = bestStrategy;
int tries = g_maxTries;
{
{
/* one iterations of hill climbing with the level-defined parameters. */
winnerInfo_t w1 = climbOnce(target, allMT, buf, ctx, winner.params);
if(compareResultLT(winner.result, w1.result, target, buf.srcSize)) {
@ -2323,7 +2323,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
while(st && tries > 0) {
winnerInfo_t wc;
DEBUGOUTPUT("StrategySwitch: %s\n", g_stratName[st]);
wc = optimizeFixedStrategy(buf, ctx, target, paramBase, st, allMT, tries);
if(compareResultLT(winner.result, wc.result, target, buf.srcSize)) {
@ -2349,7 +2349,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
goto _cleanUp;
}
/* end summary */
_displayCleanUp:
_displayCleanUp:
if(g_displayLevel >= 0) { BMK_displayOneResult(stdout, winner, buf.srcSize); }
BMK_translateAdvancedParams(stdout, winner.params);
DISPLAYLEVEL(1, "grillParams size - optimizer completed \n");
@ -2427,7 +2427,7 @@ static double readDoubleFromChar(const char** stringPtr)
}
(*stringPtr)++;
while ((**stringPtr >='0') && (**stringPtr <='9')) {
result += (double)(**stringPtr - '0') / divide, divide *= 10, (*stringPtr)++ ;
result += (double)(**stringPtr - '0') / divide, divide *= 10, (*stringPtr)++ ;
}
return result;
}
@ -2503,7 +2503,7 @@ int main(int argc, const char** argv)
int seperateFiles = 0;
double compressibility = COMPRESSIBILITY_DEFAULT;
U32 memoTableLog = PARAM_UNSET;
constraint_t target = { 0, 0, (U32)-1 };
constraint_t target = { 0, 0, (U32)-1 };
paramValues_t paramTarget = emptyParams();
g_params = emptyParams();
@ -2522,7 +2522,7 @@ int main(int argc, const char** argv)
for ( ; ;) {
if(parse_params(&argument, &paramTarget)) { if(argument[0] == ',') { argument++; continue; } else break; }
PARSE_SUB_ARGS("compressionSpeed=" , "cSpeed=", target.cSpeed);
PARSE_SUB_ARGS("decompressionSpeed=", "dSpeed=", target.dSpeed);
PARSE_SUB_ARGS("decompressionSpeed=", "dSpeed=", target.dSpeed);
PARSE_SUB_ARGS("compressionMemory=" , "cMem=", target.cMem);
PARSE_SUB_ARGS("strict=", "stc=", g_strictness);
PARSE_SUB_ARGS("maxTries=", "tries=", g_maxTries);
@ -2701,7 +2701,7 @@ int main(int argc, const char** argv)
/* load dictionary file (only applicable for optimizer rn) */
case 'D':
if(i == argc - 1) { /* last argument, return error. */
if(i == argc - 1) { /* last argument, return error. */
DISPLAY("Dictionary file expected but not given : %d\n", i);
return 1;
} else {
@ -2749,7 +2749,7 @@ int main(int argc, const char** argv)
} else {
result = benchFiles(argv+filenamesStart, argc-filenamesStart, dictFileName, cLevelRun);
}
}
}
}
if (main_pause) { int unused; printf("press enter...\n"); unused = getchar(); (void)unused; }