Merge pull request #991 from facebook/progressiveMT

Non-blocking compression
dev
Yann Collet 2018-01-17 14:35:23 -08:00 committed by GitHub
commit 592ce5a042
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 216 additions and 130 deletions

View File

@ -287,9 +287,8 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
} }
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
case ZSTD_p_nonBlockingMode:
case ZSTD_p_jobSize: case ZSTD_p_jobSize:
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
case ZSTD_p_overlapSizeLog: case ZSTD_p_overlapSizeLog:
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
@ -413,11 +412,18 @@ size_t ZSTD_CCtxParam_setParameter(
return ZSTDMT_CCtxParam_setNbThreads(CCtxParams, value); return ZSTDMT_CCtxParam_setNbThreads(CCtxParams, value);
#endif #endif
case ZSTD_p_nonBlockingMode :
#ifndef ZSTD_MULTITHREAD
return ERROR(parameter_unsupported);
#else
CCtxParams->nonBlockingMode = (value>0);
return CCtxParams->nonBlockingMode;
#endif
case ZSTD_p_jobSize : case ZSTD_p_jobSize :
#ifndef ZSTD_MULTITHREAD #ifndef ZSTD_MULTITHREAD
return ERROR(parameter_unsupported); return ERROR(parameter_unsupported);
#else #else
if (CCtxParams->nbThreads <= 1) return ERROR(parameter_unsupported);
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value); return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
#endif #endif
@ -425,7 +431,6 @@ size_t ZSTD_CCtxParam_setParameter(
#ifndef ZSTD_MULTITHREAD #ifndef ZSTD_MULTITHREAD
return ERROR(parameter_unsupported); return ERROR(parameter_unsupported);
#else #else
if (CCtxParams->nbThreads <= 1) return ERROR(parameter_unsupported);
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value); return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
#endif #endif
@ -2035,7 +2040,8 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
{ {
size_t fhSize = 0; size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u", cctx->stage); DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (U32)srcSize);
if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
if (frame && (cctx->stage==ZSTDcs_init)) { if (frame && (cctx->stage==ZSTDcs_init)) {
@ -2669,7 +2675,9 @@ ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
return cdict; return cdict;
} }
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
assert(cdict != NULL);
return cdict->cParams; return cdict->cParams;
} }
@ -3103,12 +3111,17 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
#ifdef ZSTD_MULTITHREAD #ifdef ZSTD_MULTITHREAD
if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
params.nbThreads = 1; /* do not invoke multi-threading when src size is too small */ params.nbThreads = 1; /* do not invoke multi-threading when src size is too small */
if (params.nbThreads > 1) { params.nonBlockingMode = 0;
}
if ((params.nbThreads > 1) | (params.nonBlockingMode == 1)) {
if (cctx->mtctx == NULL || (params.nbThreads != ZSTDMT_getNbThreads(cctx->mtctx))) { if (cctx->mtctx == NULL || (params.nbThreads != ZSTDMT_getNbThreads(cctx->mtctx))) {
DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbThreads=%u (previous: %u)", DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbThreads=%u",
params.nbThreads, (unsigned)ZSTDMT_getNbThreads(cctx->mtctx)); params.nbThreads);
if (cctx->mtctx != NULL)
DEBUGLOG(4, "ZSTD_compress_generic: previous nbThreads was %u",
ZSTDMT_getNbThreads(cctx->mtctx));
ZSTDMT_freeCCtx(cctx->mtctx); ZSTDMT_freeCCtx(cctx->mtctx);
cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem); cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem);
if (cctx->mtctx == NULL) return ERROR(memory_allocation); if (cctx->mtctx == NULL) return ERROR(memory_allocation);
@ -3120,6 +3133,7 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
cctx->streamStage = zcss_load; cctx->streamStage = zcss_load;
cctx->appliedParams.nbThreads = params.nbThreads; cctx->appliedParams.nbThreads = params.nbThreads;
cctx->appliedParams.nonBlockingMode = params.nonBlockingMode;
} else } else
#endif #endif
{ CHECK_F( ZSTD_resetCStream_internal( { CHECK_F( ZSTD_resetCStream_internal(
@ -3132,7 +3146,7 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
/* compression stage */ /* compression stage */
#ifdef ZSTD_MULTITHREAD #ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbThreads > 1) { if ((cctx->appliedParams.nbThreads > 1) | (cctx->appliedParams.nonBlockingMode==1)) {
size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
if ( ZSTD_isError(flushMin) if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */

View File

@ -150,6 +150,7 @@ struct ZSTD_CCtx_params_s {
/* Multithreading: used to pass parameters to mtctx */ /* Multithreading: used to pass parameters to mtctx */
U32 nbThreads; U32 nbThreads;
int nonBlockingMode; /* will trigger ZSTDMT even with nbThreads==1 */
unsigned jobSize; unsigned jobSize;
unsigned overlapSizeLog; unsigned overlapSizeLog;

View File

@ -11,6 +11,7 @@
/* ====== Tuning parameters ====== */ /* ====== Tuning parameters ====== */
#define ZSTDMT_NBTHREADS_MAX 200 #define ZSTDMT_NBTHREADS_MAX 200
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (2 GB)) /* note : limited by `jobSize` type, which is `unsigned` */
#define ZSTDMT_OVERLAPLOG_DEFAULT 6 #define ZSTDMT_OVERLAPLOG_DEFAULT 6
@ -22,6 +23,7 @@
/* ====== Dependencies ====== */ /* ====== Dependencies ====== */
#include <string.h> /* memcpy, memset */ #include <string.h> /* memcpy, memset */
#include <limits.h> /* INT_MAX */
#include "pool.h" /* threadpool */ #include "pool.h" /* threadpool */
#include "threading.h" /* mutex */ #include "threading.h" /* mutex */
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@ -129,7 +131,7 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
{ {
size_t const poolSize = sizeof(*bufPool) size_t const poolSize = sizeof(*bufPool)
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t); + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
unsigned u; unsigned u;
size_t totalBufferSize = 0; size_t totalBufferSize = 0;
ZSTD_pthread_mutex_lock(&bufPool->poolMutex); ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
@ -149,7 +151,9 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
} }
/** ZSTDMT_getBuffer() : /** ZSTDMT_getBuffer() :
* assumption : bufPool must be valid */ * assumption : bufPool must be valid
* @return : a buffer, with start pointer and size
* note: allocation may fail, in this case, start==NULL and size==0 */
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
{ {
size_t const bSize = bufPool->bufferSize; size_t const bSize = bufPool->bufferSize;
@ -177,7 +181,11 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
void* const start = ZSTD_malloc(bSize, bufPool->cMem); void* const start = ZSTD_malloc(bSize, bufPool->cMem);
buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.start = start; /* note : start can be NULL if malloc fails ! */
buffer.size = (start==NULL) ? 0 : bSize; buffer.size = (start==NULL) ? 0 : bSize;
DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); if (start==NULL) {
DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
} else {
DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
}
return buffer; return buffer;
} }
} }
@ -201,20 +209,6 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
ZSTD_free(buf.start, bufPool->cMem); ZSTD_free(buf.start, bufPool->cMem);
} }
/* Sets parameters relevant to the compression job, initializing others to
* default values. Notably, nbThreads should probably be zero. */
static ZSTD_CCtx_params ZSTDMT_makeJobCCtxParams(ZSTD_CCtx_params const params)
{
ZSTD_CCtx_params jobParams;
memset(&jobParams, 0, sizeof(jobParams));
jobParams.cParams = params.cParams;
jobParams.fParams = params.fParams;
jobParams.compressionLevel = params.compressionLevel;
jobParams.ldmParams = params.ldmParams;
return jobParams;
}
/* ===== CCtx Pool ===== */ /* ===== CCtx Pool ===== */
/* a single CCtx Pool can be invoked from multiple threads in parallel */ /* a single CCtx Pool can be invoked from multiple threads in parallel */
@ -305,13 +299,16 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
} }
/* ===== Thread worker ===== */ /* ------------------------------------------ */
/* ===== Thread worker ===== */
/* ------------------------------------------ */
typedef struct { typedef struct {
buffer_t src; buffer_t src;
const void* srcStart; const void* srcStart;
size_t prefixSize; size_t prefixSize;
size_t srcSize; size_t srcSize;
size_t readSize;
buffer_t dstBuff; buffer_t dstBuff;
size_t cSize; size_t cSize;
size_t dstFlushed; size_t dstFlushed;
@ -328,21 +325,19 @@ typedef struct {
unsigned long long fullFrameSize; unsigned long long fullFrameSize;
} ZSTDMT_jobDescription; } ZSTDMT_jobDescription;
/* ZSTDMT_compressChunk() : POOL_function type */ /* ZSTDMT_compressChunk() is a POOL_function type */
void ZSTDMT_compressChunk(void* jobDescription) void ZSTDMT_compressChunk(void* jobDescription)
{ {
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
const void* const src = (const char*)job->srcStart + job->prefixSize; const void* const src = (const char*)job->srcStart + job->prefixSize;
buffer_t dstBuff = job->dstBuff; buffer_t dstBuff = job->dstBuff;
DEBUGLOG(5, "ZSTDMT_compressChunk: job (first:%u) (last:%u) : prefixSize %u, srcSize %u ",
job->firstChunk, job->lastChunk, (U32)job->prefixSize, (U32)job->srcSize);
/* ressources */
if (cctx==NULL) { if (cctx==NULL) {
job->cSize = ERROR(memory_allocation); job->cSize = ERROR(memory_allocation);
goto _endJob; goto _endJob;
} }
if (dstBuff.start == NULL) { if (dstBuff.start == NULL) {
dstBuff = ZSTDMT_getBuffer(job->bufPool); dstBuff = ZSTDMT_getBuffer(job->bufPool);
if (dstBuff.start==NULL) { if (dstBuff.start==NULL) {
@ -350,30 +345,26 @@ void ZSTDMT_compressChunk(void* jobDescription)
goto _endJob; goto _endJob;
} }
job->dstBuff = dstBuff; job->dstBuff = dstBuff;
DEBUGLOG(5, "ZSTDMT_compressChunk: received dstBuff of size %u", (U32)dstBuff.size);
} }
/* init */
if (job->cdict) { if (job->cdict) {
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dm_auto, job->cdict, job->params, job->fullFrameSize); size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dm_auto, job->cdict, job->params, job->fullFrameSize);
DEBUGLOG(4, "ZSTDMT_compressChunk: init using CDict (windowLog=%u)", job->params.cParams.windowLog);
assert(job->firstChunk); /* only allowed for first job */ assert(job->firstChunk); /* only allowed for first job */
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; } if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
} else { /* srcStart points at reloaded section */ } else { /* srcStart points at reloaded section */
U64 const pledgedSrcSize = job->firstChunk ? job->fullFrameSize : ZSTD_CONTENTSIZE_UNKNOWN; U64 const pledgedSrcSize = job->firstChunk ? job->fullFrameSize : job->srcSize;
ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstChunk); { size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstChunk);
if (ZSTD_isError(forceWindowError)) { if (ZSTD_isError(forceWindowError)) {
DEBUGLOG(5, "ZSTD_CCtxParam_setParameter error : %s ", ZSTD_getErrorName(forceWindowError)); job->cSize = forceWindowError;
job->cSize = forceWindowError; goto _endJob;
goto _endJob; } }
}
DEBUGLOG(5, "ZSTDMT_compressChunk: invoking ZSTD_compressBegin_advanced_internal with windowLog = %u ", jobParams.cParams.windowLog);
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
job->srcStart, job->prefixSize, ZSTD_dm_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ job->srcStart, job->prefixSize, ZSTD_dm_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
NULL, NULL, /*cdict*/
jobParams, pledgedSrcSize); jobParams, pledgedSrcSize);
if (ZSTD_isError(initError)) { if (ZSTD_isError(initError)) {
DEBUGLOG(5, "ZSTD_compressBegin_advanced_internal error : %s ", ZSTD_getErrorName(initError));
job->cSize = initError; job->cSize = initError;
goto _endJob; goto _endJob;
} } } }
@ -381,22 +372,56 @@ void ZSTDMT_compressChunk(void* jobDescription)
if (!job->firstChunk) { /* flush and overwrite frame header when it's not first job */ if (!job->firstChunk) { /* flush and overwrite frame header when it's not first job */
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0); size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0);
if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; } if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
DEBUGLOG(5, "ZSTDMT_compressChunk: flush and overwrite %u bytes of frame header (not first chunk)", (U32)hSize);
ZSTD_invalidateRepCodes(cctx); ZSTD_invalidateRepCodes(cctx);
} }
DEBUGLOG(5, "Compressing into dstBuff of size %u", (U32)dstBuff.size); /* compress */
DEBUG_PRINTHEX(6, job->srcStart, 12); #if 0
job->cSize = (job->lastChunk) ? job->cSize = (job->lastChunk) ?
ZSTD_compressEnd (cctx, dstBuff.start, dstBuff.size, src, job->srcSize) : ZSTD_compressEnd (cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, job->srcSize); ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
DEBUGLOG(5, "compressed %u bytes into %u bytes (first:%u) (last:%u) ", #else
(unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk); if (sizeof(size_t) > sizeof(int))
DEBUGLOG(5, "dstBuff.size : %u ; => %s ", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize)); assert(job->srcSize < ((size_t)INT_MAX) * ZSTD_BLOCKSIZE_MAX); /* check overflow */
{ int const nbBlocks = (int)((job->srcSize + (ZSTD_BLOCKSIZE_MAX-1)) / ZSTD_BLOCKSIZE_MAX);
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dstBuff.start;
BYTE* op = ostart;
BYTE* oend = op + dstBuff.size;
int blockNb;
DEBUGLOG(5, "ZSTDMT_compressChunk: compress %u bytes in %i blocks", (U32)job->srcSize, nbBlocks);
job->cSize = 0;
for (blockNb = 1; blockNb < nbBlocks; blockNb++) {
size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, ZSTD_BLOCKSIZE_MAX);
if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
ip += ZSTD_BLOCKSIZE_MAX;
op += cSize; assert(op < oend);
/* stats */
job->cSize += cSize;
job->readSize = ZSTD_BLOCKSIZE_MAX * blockNb;
}
/* last block */
if ((nbBlocks > 0) | job->lastChunk /*need to output a "last block" flag*/ ) {
size_t const lastBlockSize1 = job->srcSize & (ZSTD_BLOCKSIZE_MAX-1);
size_t const lastBlockSize = ((lastBlockSize1==0) & (job->srcSize>=ZSTD_BLOCKSIZE_MAX)) ? ZSTD_BLOCKSIZE_MAX : lastBlockSize1;
size_t const cSize = (job->lastChunk) ?
ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
/* stats */
job->cSize += cSize;
}
job->readSize = job->srcSize;
}
#endif
_endJob: _endJob:
/* release */
ZSTDMT_releaseCCtx(job->cctxPool, cctx); ZSTDMT_releaseCCtx(job->cctxPool, cctx);
ZSTDMT_releaseBuffer(job->bufPool, job->src); ZSTDMT_releaseBuffer(job->bufPool, job->src);
job->src = g_nullBuffer; job->srcStart = NULL; job->src = g_nullBuffer; job->srcStart = NULL;
/* report */
ZSTD_PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex); ZSTD_PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
job->jobCompleted = 1; job->jobCompleted = 1;
job->jobScanned = 0; job->jobScanned = 0;
@ -424,11 +449,11 @@ struct ZSTDMT_CCtx_s {
ZSTD_CCtx_params params; ZSTD_CCtx_params params;
size_t targetSectionSize; size_t targetSectionSize;
size_t inBuffSize; size_t inBuffSize;
size_t dictSize; size_t prefixSize;
size_t targetDictSize; size_t targetPrefixSize;
inBuff_t inBuff; inBuff_t inBuff;
XXH64_state_t xxhState; XXH64_state_t xxhState;
unsigned singleThreaded; unsigned singleBlockingThread;
unsigned jobIDMask; unsigned jobIDMask;
unsigned doneJobID; unsigned doneJobID;
unsigned nextJobID; unsigned nextJobID;
@ -440,6 +465,21 @@ struct ZSTDMT_CCtx_s {
const ZSTD_CDict* cdict; const ZSTD_CDict* cdict;
}; };
/* Sets parameters relevant to the compression job, initializing others to
* default values. Notably, nbThreads should probably be zero. */
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
{
ZSTD_CCtx_params jobParams;
memset(&jobParams, 0, sizeof(jobParams));
jobParams.cParams = params.cParams;
jobParams.fParams = params.fParams;
jobParams.compressionLevel = params.compressionLevel;
jobParams.ldmParams = params.ldmParams;
return jobParams;
}
static ZSTDMT_jobDescription* ZSTDMT_allocJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem) static ZSTDMT_jobDescription* ZSTDMT_allocJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
{ {
U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
@ -464,7 +504,7 @@ size_t ZSTDMT_CCtxParam_setNbThreads(ZSTD_CCtx_params* params, unsigned nbThread
/* ZSTDMT_getNbThreads(): /* ZSTDMT_getNbThreads():
* @return nb threads currently active in mtctx. * @return nb threads currently active in mtctx.
* mtctx must be valid */ * mtctx must be valid */
size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx) unsigned ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx)
{ {
assert(mtctx != NULL); assert(mtctx != NULL);
return mtctx->params.nbThreads; return mtctx->params.nbThreads;
@ -616,17 +656,21 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
/* ===== Multi-threaded compression ===== */ /* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */ /* ------------------------------------------ */
static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbThreads) { static unsigned ZSTDMT_computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbThreads) {
size_t const chunkSizeTarget = (size_t)1 << (windowLog + 2); assert(nbThreads>0);
size_t const chunkMaxSize = chunkSizeTarget << 2; { size_t const chunkSizeTarget = (size_t)1 << (windowLog + 2);
size_t const passSizeMax = chunkMaxSize * nbThreads; size_t const chunkMaxSize = chunkSizeTarget << 2;
unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; size_t const passSizeMax = chunkMaxSize * nbThreads;
unsigned const nbChunksLarge = multiplier * nbThreads; unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
unsigned const nbChunksMax = (unsigned)(srcSize / chunkSizeTarget) + 1; unsigned const nbChunksLarge = multiplier * nbThreads;
unsigned const nbChunksSmall = MIN(nbChunksMax, nbThreads); unsigned const nbChunksMax = (unsigned)(srcSize / chunkSizeTarget) + 1;
return (multiplier>1) ? nbChunksLarge : nbChunksSmall; unsigned const nbChunksSmall = MIN(nbChunksMax, nbThreads);
} return (multiplier>1) ? nbChunksLarge : nbChunksSmall;
} }
/* ZSTDMT_compress_advanced_internal() :
* This is a blocking function : it will only give back control to caller after finishing its compression job.
*/
static size_t ZSTDMT_compress_advanced_internal( static size_t ZSTDMT_compress_advanced_internal(
ZSTDMT_CCtx* mtctx, ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity, void* dst, size_t dstCapacity,
@ -634,10 +678,10 @@ static size_t ZSTDMT_compress_advanced_internal(
const ZSTD_CDict* cdict, const ZSTD_CDict* cdict,
ZSTD_CCtx_params const params) ZSTD_CCtx_params const params)
{ {
ZSTD_CCtx_params const jobParams = ZSTDMT_makeJobCCtxParams(params); ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog; unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog); size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog);
unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, params.nbThreads); unsigned nbChunks = ZSTDMT_computeNbChunks(srcSize, params.cParams.windowLog, params.nbThreads);
size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks; size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
size_t const avgChunkSize = (((proposedChunkSize-1) & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */ size_t const avgChunkSize = (((proposedChunkSize-1) & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */
const char* const srcStart = (const char*)src; const char* const srcStart = (const char*)src;
@ -648,14 +692,16 @@ static size_t ZSTDMT_compress_advanced_internal(
assert(jobParams.nbThreads == 0); assert(jobParams.nbThreads == 0);
assert(mtctx->cctxPool->totalCCtx == params.nbThreads); assert(mtctx->cctxPool->totalCCtx == params.nbThreads);
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbChunks=%2u (rawSize=%u bytes; fixedSize=%u) ", DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbChunks=%2u (rawSize=%u bytes; fixedSize=%u) ",
nbChunks, (U32)proposedChunkSize, (U32)avgChunkSize); nbChunks, (U32)proposedChunkSize, (U32)avgChunkSize);
if (nbChunks==1) { /* fallback to single-thread mode */
if ((nbChunks==1) | (params.nbThreads<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
} }
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) ); ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) );
XXH64_reset(&xxh64, 0); XXH64_reset(&xxh64, 0);
@ -665,6 +711,7 @@ static size_t ZSTDMT_compress_advanced_internal(
mtctx->jobIDMask = 0; mtctx->jobIDMask = 0;
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, mtctx->cMem); mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, mtctx->cMem);
if (mtctx->jobs==NULL) return ERROR(memory_allocation); if (mtctx->jobs==NULL) return ERROR(memory_allocation);
assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
mtctx->jobIDMask = nbJobs - 1; mtctx->jobIDMask = nbJobs - 1;
} }
@ -797,17 +844,24 @@ size_t ZSTDMT_initCStream_internal(
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
assert(zcs->cctxPool->totalCCtx == params.nbThreads); assert(zcs->cctxPool->totalCCtx == params.nbThreads);
zcs->singleThreaded = (params.nbThreads==1) | (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */ zcs->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
if (params.jobSize == 0) {
if (params.cParams.windowLog >= 29)
params.jobSize = ZSTDMT_JOBSIZE_MAX;
else
params.jobSize = 1 << (params.cParams.windowLog + 2);
}
if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
if (zcs->singleThreaded) { if (zcs->singleBlockingThread) {
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_makeJobCCtxParams(params); ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
DEBUGLOG(4, "single thread mode"); DEBUGLOG(4, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
assert(singleThreadParams.nbThreads == 0); assert(singleThreadParams.nbThreads == 0);
return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0], return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0],
dict, dictSize, cdict, dict, dictSize, cdict,
singleThreadParams, pledgedSrcSize); singleThreadParams, pledgedSrcSize);
} }
DEBUGLOG(4, "multi-threading mode (%u threads)", params.nbThreads); DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u threads", params.nbThreads);
if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */ if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */
ZSTDMT_waitForAllJobsCompleted(zcs); ZSTDMT_waitForAllJobsCompleted(zcs);
@ -831,17 +885,17 @@ size_t ZSTDMT_initCStream_internal(
} }
assert(params.overlapSizeLog <= 9); assert(params.overlapSizeLog <= 9);
zcs->targetDictSize = (params.overlapSizeLog==0) ? 0 : (size_t)1 << (params.cParams.windowLog - (9 - params.overlapSizeLog)); zcs->targetPrefixSize = (params.overlapSizeLog==0) ? 0 : (size_t)1 << (params.cParams.windowLog - (9 - params.overlapSizeLog));
DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(zcs->targetDictSize>>10)); DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(zcs->targetPrefixSize>>10));
zcs->targetSectionSize = params.jobSize ? params.jobSize : (size_t)1 << (params.cParams.windowLog + 2); zcs->targetSectionSize = params.jobSize;
if (zcs->targetSectionSize < ZSTDMT_JOBSIZE_MIN) zcs->targetSectionSize = ZSTDMT_JOBSIZE_MIN; if (zcs->targetSectionSize < ZSTDMT_JOBSIZE_MIN) zcs->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
if (zcs->targetSectionSize < zcs->targetDictSize) zcs->targetSectionSize = zcs->targetDictSize; /* job size must be >= overlap size */ if (zcs->targetSectionSize < zcs->targetPrefixSize) zcs->targetSectionSize = zcs->targetPrefixSize; /* job size must be >= overlap size */
DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(zcs->targetSectionSize>>10), params.jobSize); DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(zcs->targetSectionSize>>10), params.jobSize);
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize; zcs->inBuffSize = zcs->targetPrefixSize + zcs->targetSectionSize;
DEBUGLOG(4, "inBuff Size : %u KB", (U32)(zcs->inBuffSize>>10)); DEBUGLOG(4, "inBuff Size : %u KB", (U32)(zcs->inBuffSize>>10));
ZSTDMT_setBufferSize(zcs->bufPool, MAX(zcs->inBuffSize, ZSTD_compressBound(zcs->targetSectionSize)) ); ZSTDMT_setBufferSize(zcs->bufPool, MAX(zcs->inBuffSize, ZSTD_compressBound(zcs->targetSectionSize)) );
zcs->inBuff.buffer = g_nullBuffer; zcs->inBuff.buffer = g_nullBuffer;
zcs->dictSize = 0; zcs->prefixSize = 0;
zcs->doneJobID = 0; zcs->doneJobID = 0;
zcs->nextJobID = 0; zcs->nextJobID = 0;
zcs->frameEnded = 0; zcs->frameEnded = 0;
@ -855,8 +909,8 @@ size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
ZSTD_parameters params, ZSTD_parameters params,
unsigned long long pledgedSrcSize) unsigned long long pledgedSrcSize)
{ {
ZSTD_CCtx_params cctxParams = mtctx->params; ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
DEBUGLOG(5, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize); DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
cctxParams.cParams = params.cParams; cctxParams.cParams = params.cParams;
cctxParams.fParams = params.fParams; cctxParams.fParams = params.fParams;
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dm_auto, NULL, return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dm_auto, NULL,
@ -869,9 +923,9 @@ size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
unsigned long long pledgedSrcSize) unsigned long long pledgedSrcSize)
{ {
ZSTD_CCtx_params cctxParams = mtctx->params; ZSTD_CCtx_params cctxParams = mtctx->params;
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
cctxParams.fParams = fParams; cctxParams.fParams = fParams;
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dm_auto, cdict, return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dm_auto, cdict,
cctxParams, pledgedSrcSize); cctxParams, pledgedSrcSize);
} }
@ -891,8 +945,9 @@ size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize)
} }
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) { size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
ZSTD_CCtx_params cctxParams = zcs->params; ZSTD_CCtx_params cctxParams = zcs->params; /* retrieve sticky params */
DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
cctxParams.cParams = params.cParams; cctxParams.cParams = params.cParams;
cctxParams.fParams = params.fParams; cctxParams.fParams = params.fParams;
return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
@ -904,12 +959,13 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
unsigned const jobID = zcs->nextJobID & zcs->jobIDMask; unsigned const jobID = zcs->nextJobID & zcs->jobIDMask;
DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize); zcs->nextJobID, (U32)srcSize, (U32)zcs->prefixSize);
zcs->jobs[jobID].src = zcs->inBuff.buffer; zcs->jobs[jobID].src = zcs->inBuff.buffer;
zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start; zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start;
zcs->jobs[jobID].srcSize = srcSize; zcs->jobs[jobID].srcSize = srcSize;
zcs->jobs[jobID].prefixSize = zcs->dictSize; zcs->jobs[jobID].readSize = 0;
assert(zcs->inBuff.filled >= srcSize + zcs->dictSize); zcs->jobs[jobID].prefixSize = zcs->prefixSize;
assert(zcs->inBuff.filled >= srcSize + zcs->prefixSize);
zcs->jobs[jobID].params = zcs->params; zcs->jobs[jobID].params = zcs->params;
/* do not calculate checksum within sections, but write it in header for first section */ /* do not calculate checksum within sections, but write it in header for first section */
if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0; if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;
@ -926,11 +982,11 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond; zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond;
if (zcs->params.fParams.checksumFlag) if (zcs->params.fParams.checksumFlag)
XXH64_update(&zcs->xxhState, (const char*)zcs->inBuff.buffer.start + zcs->dictSize, srcSize); XXH64_update(&zcs->xxhState, (const char*)zcs->inBuff.buffer.start + zcs->prefixSize, srcSize);
/* get a new buffer for next input */ /* get a new buffer for next input */
if (!endFrame) { if (!endFrame) {
size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize); size_t const newPrefixSize = MIN(srcSize + zcs->prefixSize, zcs->targetPrefixSize);
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->bufPool); zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->bufPool);
if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */ if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */
zcs->jobs[jobID].jobCompleted = 1; zcs->jobs[jobID].jobCompleted = 1;
@ -939,15 +995,15 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
ZSTDMT_releaseAllJobResources(zcs); ZSTDMT_releaseAllJobResources(zcs);
return ERROR(memory_allocation); return ERROR(memory_allocation);
} }
zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize; zcs->inBuff.filled -= srcSize + zcs->prefixSize - newPrefixSize;
memmove(zcs->inBuff.buffer.start, memmove(zcs->inBuff.buffer.start,
(const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, (const char*)zcs->jobs[jobID].srcStart + zcs->prefixSize + srcSize - newPrefixSize,
zcs->inBuff.filled); zcs->inBuff.filled);
zcs->dictSize = newDictSize; zcs->prefixSize = newPrefixSize;
} else { /* if (endFrame==1) */ } else { /* if (endFrame==1) */
zcs->inBuff.buffer = g_nullBuffer; zcs->inBuff.buffer = g_nullBuffer;
zcs->inBuff.filled = 0; zcs->inBuff.filled = 0;
zcs->dictSize = 0; zcs->prefixSize = 0;
zcs->frameEnded = 1; zcs->frameEnded = 1;
if (zcs->nextJobID == 0) { if (zcs->nextJobID == 0) {
/* single chunk exception : checksum is calculated directly within worker thread */ /* single chunk exception : checksum is calculated directly within worker thread */
@ -973,7 +1029,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush) static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush)
{ {
unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask; unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask;
DEBUGLOG(5, "ZSTDMT_flushNextJob"); DEBUGLOG(5, "ZSTDMT_flushNextJob (blocking:%u)", blockToFlush);
if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */ if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */
ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
while (zcs->jobs[wJobID].jobCompleted==0) { while (zcs->jobs[wJobID].jobCompleted==0) {
@ -986,17 +1042,17 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
{ ZSTDMT_jobDescription job = zcs->jobs[wJobID]; { ZSTDMT_jobDescription job = zcs->jobs[wJobID];
if (!job.jobScanned) { if (!job.jobScanned) {
if (ZSTD_isError(job.cSize)) { if (ZSTD_isError(job.cSize)) {
DEBUGLOG(5, "job %u : compression error detected : %s", DEBUGLOG(5, "ZSTDMT_flushNextJob: job %u : compression error detected : %s",
zcs->doneJobID, ZSTD_getErrorName(job.cSize)); zcs->doneJobID, ZSTD_getErrorName(job.cSize));
ZSTDMT_waitForAllJobsCompleted(zcs); ZSTDMT_waitForAllJobsCompleted(zcs);
ZSTDMT_releaseAllJobResources(zcs); ZSTDMT_releaseAllJobResources(zcs);
return job.cSize; return job.cSize;
} }
DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag); DEBUGLOG(5, "ZSTDMT_flushNextJob: zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag);
if (zcs->params.fParams.checksumFlag) { if (zcs->params.fParams.checksumFlag) {
if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */ if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */
U32 const checksum = (U32)XXH64_digest(&zcs->xxhState); U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
DEBUGLOG(5, "writing checksum : %08X \n", checksum); DEBUGLOG(5, "ZSTDMT_flushNextJob: writing checksum : %08X \n", checksum);
MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum); MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum);
job.cSize += 4; job.cSize += 4;
zcs->jobs[wJobID].cSize += 4; zcs->jobs[wJobID].cSize += 4;
@ -1004,7 +1060,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
zcs->jobs[wJobID].jobScanned = 1; zcs->jobs[wJobID].jobScanned = 1;
} }
{ size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos); { size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
DEBUGLOG(5, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID); DEBUGLOG(5, "ZSTDMT_flushNextJob: Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID);
memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite); memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite);
output->pos += toWrite; output->pos += toWrite;
job.dstFlushed += toWrite; job.dstFlushed += toWrite;
@ -1034,13 +1090,13 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_inBuffer* input, ZSTD_inBuffer* input,
ZSTD_EndDirective endOp) ZSTD_EndDirective endOp)
{ {
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize; size_t const newJobThreshold = mtctx->prefixSize + mtctx->targetSectionSize;
unsigned forwardInputProgress = 0; unsigned forwardInputProgress = 0;
DEBUGLOG(5, "ZSTDMT_compressStream_generic "); DEBUGLOG(5, "ZSTDMT_compressStream_generic ");
assert(output->pos <= output->size); assert(output->pos <= output->size);
assert(input->pos <= input->size); assert(input->pos <= input->size);
if (mtctx->singleThreaded) { /* delegate to single-thread (synchronous) */ if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp); return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
} }
@ -1070,10 +1126,13 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
/* fill input buffer */ /* fill input buffer */
if (input->size > input->pos) { /* support NULL input */ if (input->size > input->pos) { /* support NULL input */
if (mtctx->inBuff.buffer.start == NULL) { if (mtctx->inBuff.buffer.start == NULL) {
mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool); /* note : may fail, in which case, no forward input progress */ mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool); /* note : allocation can fail, in which case, no forward input progress */
mtctx->inBuff.filled = 0; mtctx->inBuff.filled = 0;
} if ( (mtctx->inBuff.buffer.start == NULL) /* allocation failure */
if (mtctx->inBuff.buffer.start) { && (mtctx->doneJobID == mtctx->nextJobID) ) { /* and nothing to flush */
return ERROR(memory_allocation); /* no forward progress possible => output an error */
} }
if (mtctx->inBuff.buffer.start != NULL) {
size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled); size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad); DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad); memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
@ -1118,7 +1177,7 @@ size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBu
static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned endFrame) static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned endFrame)
{ {
size_t const srcSize = mtctx->inBuff.filled - mtctx->dictSize; size_t const srcSize = mtctx->inBuff.filled - mtctx->prefixSize;
DEBUGLOG(5, "ZSTDMT_flushStream_internal"); DEBUGLOG(5, "ZSTDMT_flushStream_internal");
if ( ((srcSize > 0) || (endFrame && !mtctx->frameEnded)) if ( ((srcSize > 0) || (endFrame && !mtctx->frameEnded))
@ -1135,7 +1194,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
{ {
DEBUGLOG(5, "ZSTDMT_flushStream"); DEBUGLOG(5, "ZSTDMT_flushStream");
if (mtctx->singleThreaded) if (mtctx->singleBlockingThread)
return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output); return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(mtctx, output, 0 /* endFrame */); return ZSTDMT_flushStream_internal(mtctx, output, 0 /* endFrame */);
} }
@ -1143,7 +1202,7 @@ size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
{ {
DEBUGLOG(4, "ZSTDMT_endStream"); DEBUGLOG(4, "ZSTDMT_endStream");
if (mtctx->singleThreaded) if (mtctx->singleBlockingThread)
return ZSTD_endStream(mtctx->cctxPool->cctx[0], output); return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(mtctx, output, 1 /* endFrame */); return ZSTDMT_flushStream_internal(mtctx, output, 1 /* endFrame */);
} }

View File

@ -85,7 +85,7 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
typedef enum { typedef enum {
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */ ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */ ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
} ZSTDMT_parameter; } ZSTDMT_parameter;
/* ZSTDMT_setMTCtxParameter() : /* ZSTDMT_setMTCtxParameter() :
@ -120,7 +120,7 @@ size_t ZSTDMT_CCtxParam_setNbThreads(ZSTD_CCtx_params* params, unsigned nbThread
/* ZSTDMT_getNbThreads(): /* ZSTDMT_getNbThreads():
* @return nb threads currently active in mtctx. * @return nb threads currently active in mtctx.
* mtctx must be valid */ * mtctx must be valid */
size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx); unsigned ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx);
/*! ZSTDMT_initCStream_internal() : /*! ZSTDMT_initCStream_internal() :
* Private use only. Init streaming operation. * Private use only. Init streaming operation.

View File

@ -1759,7 +1759,7 @@ static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skip
* or an error code, which can be tested using ZSTD_isError() */ * or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{ {
DEBUGLOG(5, "ZSTD_decompressContinue"); DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
/* Sanity check */ /* Sanity check */
if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */
if (dstCapacity) ZSTD_checkContinuity(dctx, dst); if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
@ -1820,12 +1820,12 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decompressLastBlock: case ZSTDds_decompressLastBlock:
case ZSTDds_decompressBlock: case ZSTDds_decompressBlock:
DEBUGLOG(5, "case ZSTDds_decompressBlock"); DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
{ size_t rSize; { size_t rSize;
switch(dctx->bType) switch(dctx->bType)
{ {
case bt_compressed: case bt_compressed:
DEBUGLOG(5, "case bt_compressed"); DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
break; break;
case bt_raw : case bt_raw :
@ -1839,12 +1839,12 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
return ERROR(corruption_detected); return ERROR(corruption_detected);
} }
if (ZSTD_isError(rSize)) return rSize; if (ZSTD_isError(rSize)) return rSize;
DEBUGLOG(5, "decoded size from block : %u", (U32)rSize); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
dctx->decodedSize += rSize; dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "decoded size from frame : %u", (U32)dctx->decodedSize); DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
if (dctx->decodedSize != dctx->fParams.frameContentSize) { if (dctx->decodedSize != dctx->fParams.frameContentSize) {
return ERROR(corruption_detected); return ERROR(corruption_detected);
@ -1868,7 +1868,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
assert(srcSize == 4); /* guaranteed by dctx->expected */ assert(srcSize == 4); /* guaranteed by dctx->expected */
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src); U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "checksum : calculated %08X :: %08X read", h32, check32); DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
if (check32 != h32) return ERROR(checksum_wrong); if (check32 != h32) return ERROR(checksum_wrong);
dctx->expected = 0; dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize; dctx->stage = ZSTDds_getFrameHeaderSize;

View File

@ -972,10 +972,20 @@ typedef enum {
ZSTD_p_dictIDFlag, /* When applicable, dictionary's ID is written into frame header (default:1) */ ZSTD_p_dictIDFlag, /* When applicable, dictionary's ID is written into frame header (default:1) */
/* multi-threading parameters */ /* multi-threading parameters */
/* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD).
* They return an error otherwise. */
ZSTD_p_nbThreads=400, /* Select how many threads a compression job can spawn (default:1) ZSTD_p_nbThreads=400, /* Select how many threads a compression job can spawn (default:1)
* More threads improve speed, but also increase memory usage. * More threads improve speed, but also increase memory usage.
* Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled. * Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled.
* Special: value 0 means "do not change nbThreads" */ * Special: value 0 means "do not change nbThreads" */
ZSTD_p_nonBlockingMode, /* Single thread mode is by default "blocking" :
* it finishes its job as much as possible, and only then gives back control to caller.
* In contrast, multi-thread is by default "non-blocking" :
* it takes some input, flush some output if available, and immediately gives back control to caller.
* Compression work is performed in parallel, within worker threads.
* (note : a strong exception to this rule is when first job is called with ZSTD_e_end : it becomes blocking)
* Setting this parameter to 1 will enforce non-blocking mode even when only 1 thread is selected.
* It allows the caller to do other tasks while the worker thread compresses in parallel. */
ZSTD_p_jobSize, /* Size of a compression job. This value is only enforced in streaming (non-blocking) mode. ZSTD_p_jobSize, /* Size of a compression job. This value is only enforced in streaming (non-blocking) mode.
* Each compression job is completed in parallel, so indirectly controls the nb of active threads. * Each compression job is completed in parallel, so indirectly controls the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters. * 0 means default, which is dynamically determined based on compression parameters.

View File

@ -157,6 +157,8 @@ endif
zstd-release: DEBUGFLAGS := zstd-release: DEBUGFLAGS :=
zstd-release: zstd zstd-release: zstd
zstd32 : CPPFLAGS += $(THREAD_CPP)
zstd32 : LDFLAGS += $(THREAD_LD)
zstd32 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) zstd32 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
zstd32 : $(ZSTDLIB_FILES) zstdcli.c fileio.c bench.c datagen.c dibio.c zstd32 : $(ZSTDLIB_FILES) zstdcli.c fileio.c bench.c datagen.c dibio.c
ifneq (,$(filter Windows%,$(OS))) ifneq (,$(filter Windows%,$(OS)))

View File

@ -457,6 +457,9 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
/* multi-threading */ /* multi-threading */
DISPLAYLEVEL(5,"set nb threads = %u \n", g_nbThreads); DISPLAYLEVEL(5,"set nb threads = %u \n", g_nbThreads);
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_nbThreads, g_nbThreads) ); CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_nbThreads, g_nbThreads) );
#ifdef ZSTD_MULTITHREAD
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_nonBlockingMode, 1) );
#endif
/* dictionary */ /* dictionary */
CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, srcSize) ); /* just for dictionary loading, for compression parameters adaptation */ CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, srcSize) ); /* just for dictionary loading, for compression parameters adaptation */
CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, dictBuffer, dictBuffSize) ); CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, dictBuffer, dictBuffSize) );
@ -806,21 +809,11 @@ static int FIO_compressFilename_internal(cRess_t ress,
compressedfilesize += outBuff.pos; compressedfilesize += outBuff.pos;
} }
} }
if (g_nbThreads > 1) { if (fileSize == UTIL_FILESIZE_UNKNOWN) {
if (fileSize == UTIL_FILESIZE_UNKNOWN) DISPLAYUPDATE(2, "\rRead : %u MB", (U32)(readsize>>20));
DISPLAYUPDATE(2, "\rRead : %u MB", (U32)(readsize>>20))
else
DISPLAYUPDATE(2, "\rRead : %u / %u MB",
(U32)(readsize>>20), (U32)(fileSize>>20));
} else { } else {
if (fileSize == UTIL_FILESIZE_UNKNOWN) DISPLAYUPDATE(2, "\rRead : %u / %u MB",
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", (U32)(readsize>>20), (U32)(fileSize>>20));
(U32)(readsize>>20),
(double)compressedfilesize/readsize*100)
else
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
(U32)(readsize>>20), (U32)(fileSize>>20),
(double)compressedfilesize/readsize*100);
} }
} while (directive != ZSTD_e_end); } while (directive != ZSTD_e_end);
@ -828,7 +821,7 @@ finish:
/* Status */ /* Status */
DISPLAYLEVEL(2, "\r%79s\r", ""); DISPLAYLEVEL(2, "\r%79s\r", "");
DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n", srcFileName, DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n", srcFileName,
(double)compressedfilesize/(readsize+(!readsize) /* avoid div by zero */ )*100, (double)compressedfilesize / (readsize+(!readsize)/*avoid div by zero*/) * 100,
(unsigned long long)readsize, (unsigned long long) compressedfilesize, (unsigned long long)readsize, (unsigned long long) compressedfilesize,
dstFileName); dstFileName);

View File

@ -1243,6 +1243,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
maxTestSize = FUZ_randomLength(&lseed, oldTestLog+2); maxTestSize = FUZ_randomLength(&lseed, oldTestLog+2);
if (maxTestSize >= srcBufferSize) maxTestSize = srcBufferSize-1; if (maxTestSize >= srcBufferSize) maxTestSize = srcBufferSize-1;
{ int const compressionLevel = (FUZ_rand(&lseed) % 5) + 1; { int const compressionLevel = (FUZ_rand(&lseed) % 5) + 1;
DISPLAYLEVEL(5, "Init with compression level = %i \n", compressionLevel);
CHECK_Z( ZSTDMT_initCStream(zc, compressionLevel) ); CHECK_Z( ZSTDMT_initCStream(zc, compressionLevel) );
} }
} else { } else {
@ -1301,9 +1302,12 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
if ((FUZ_rand(&lseed) & 15) == 0) { if ((FUZ_rand(&lseed) & 15) == 0) {
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog); size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize); size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize; outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (U32)adjustedDstSize); DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (U32)adjustedDstSize);
CHECK_Z( ZSTDMT_flushStream(zc, &outBuff) ); CHECK_Z( ZSTDMT_flushStream(zc, &outBuff) );
assert(outBuff.pos >= previousPos);
DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_flushStream \n", (U32)(outBuff.pos-previousPos));
} } } }
/* final frame epilogue */ /* final frame epilogue */
@ -1311,10 +1315,13 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
while (remainingToFlush) { while (remainingToFlush) {
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog); size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize); size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize; outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (U32)adjustedDstSize); DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (U32)adjustedDstSize);
remainingToFlush = ZSTDMT_endStream(zc, &outBuff); remainingToFlush = ZSTDMT_endStream(zc, &outBuff);
CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush)); CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush));
assert(outBuff.pos >= previousPos);
DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_endStream \n", (U32)(outBuff.pos-previousPos));
DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (U32)remainingToFlush); DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (U32)remainingToFlush);
} } } }
crcOrig = XXH64_digest(&xxhState); crcOrig = XXH64_digest(&xxhState);