commit
a940e78f16
|
@ -53,8 +53,50 @@ extern "C" {
|
|||
#undef MAX
|
||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||
#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
|
||||
#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
|
||||
|
||||
/**
|
||||
* Return the specified error if the condition evaluates to true.
|
||||
*
|
||||
* In debug modes, prints additional information. In order to do that
|
||||
* (particularly, printing the conditional that failed), this can't just wrap
|
||||
* RETURN_ERROR().
|
||||
*/
|
||||
#define RETURN_ERROR_IF(cond, err, ...) \
|
||||
if (cond) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Unconditionally return the specified error.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define RETURN_ERROR(err, ...) \
|
||||
do { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
} while(0);
|
||||
|
||||
/**
|
||||
* If the provided expression evaluates to an error code, returns that error code.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define FORWARD_IF_ERROR(err, ...) \
|
||||
do { \
|
||||
size_t const err_code = (err); \
|
||||
if (ERR_isError(err_code)) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return err_code; \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
|
||||
/*-*************************************
|
||||
|
|
|
@ -117,7 +117,8 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
|
|||
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
|
||||
{
|
||||
if (cctx==NULL) return 0; /* support free on NULL */
|
||||
if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */
|
||||
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
|
||||
"not compatible with static CCtx");
|
||||
ZSTD_freeCCtxContent(cctx);
|
||||
ZSTD_free(cctx, cctx->customMem);
|
||||
return 0;
|
||||
|
@ -195,7 +196,7 @@ size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
|
|||
}
|
||||
|
||||
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
|
||||
if (!cctxParams) { return ERROR(GENERIC); }
|
||||
RETURN_ERROR_IF(!cctxParams, GENERIC);
|
||||
memset(cctxParams, 0, sizeof(*cctxParams));
|
||||
cctxParams->compressionLevel = compressionLevel;
|
||||
cctxParams->fParams.contentSizeFlag = 1;
|
||||
|
@ -204,8 +205,8 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
|
|||
|
||||
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
|
||||
{
|
||||
if (!cctxParams) { return ERROR(GENERIC); }
|
||||
CHECK_F( ZSTD_checkCParams(params.cParams) );
|
||||
RETURN_ERROR_IF(!cctxParams, GENERIC);
|
||||
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
|
||||
memset(cctxParams, 0, sizeof(*cctxParams));
|
||||
cctxParams->cParams = params.cParams;
|
||||
cctxParams->fParams = params.fParams;
|
||||
|
@ -378,10 +379,10 @@ static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#define BOUNDCHECK(cParam, val) { \
|
||||
if (!ZSTD_cParam_withinBounds(cParam,val)) { \
|
||||
return ERROR(parameter_outOfBound); \
|
||||
} }
|
||||
#define BOUNDCHECK(cParam, val) { \
|
||||
RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
|
||||
parameter_outOfBound); \
|
||||
}
|
||||
|
||||
|
||||
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
|
||||
|
@ -425,7 +426,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
|
|||
if (ZSTD_isUpdateAuthorized(param)) {
|
||||
cctx->cParamsChanged = 1;
|
||||
} else {
|
||||
return ERROR(stage_wrong);
|
||||
RETURN_ERROR(stage_wrong);
|
||||
} }
|
||||
|
||||
switch(param)
|
||||
|
@ -434,7 +435,8 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
|
|||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
case ZSTD_c_compressionLevel:
|
||||
if (cctx->cdict) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->cdict, stage_wrong,
|
||||
"compression level is configured in cdict");
|
||||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
case ZSTD_c_windowLog:
|
||||
|
@ -444,7 +446,8 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
|
|||
case ZSTD_c_minMatch:
|
||||
case ZSTD_c_targetLength:
|
||||
case ZSTD_c_strategy:
|
||||
if (cctx->cdict) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->cdict, stage_wrong,
|
||||
"cparams are configured in cdict");
|
||||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
case ZSTD_c_contentSizeFlag:
|
||||
|
@ -461,9 +464,8 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
|
|||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
case ZSTD_c_nbWorkers:
|
||||
if ((value!=0) && cctx->staticSize) {
|
||||
return ERROR(parameter_unsupported); /* MT not compatible with static alloc */
|
||||
}
|
||||
RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
|
||||
"MT not compatible with static alloc");
|
||||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
case ZSTD_c_jobSize:
|
||||
|
@ -476,10 +478,10 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
|
|||
case ZSTD_c_ldmMinMatch:
|
||||
case ZSTD_c_ldmBucketSizeLog:
|
||||
case ZSTD_c_ldmHashRateLog:
|
||||
if (cctx->cdict) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->cdict, stage_wrong);
|
||||
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
|
||||
|
||||
default: return ERROR(parameter_unsupported);
|
||||
default: RETURN_ERROR(parameter_unsupported);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,7 +577,7 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|||
|
||||
case ZSTD_c_nbWorkers :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
if (value!=0) return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
|
||||
return 0;
|
||||
#else
|
||||
return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
|
||||
|
@ -583,21 +585,21 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|||
|
||||
case ZSTD_c_jobSize :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
|
||||
#endif
|
||||
|
||||
case ZSTD_c_overlapLog :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
|
||||
#endif
|
||||
|
||||
case ZSTD_c_rsyncable :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
|
||||
#endif
|
||||
|
@ -625,12 +627,12 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|||
return CCtxParams->ldmParams.bucketSizeLog;
|
||||
|
||||
case ZSTD_c_ldmHashRateLog :
|
||||
if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
|
||||
return ERROR(parameter_outOfBound);
|
||||
RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
|
||||
parameter_outOfBound);
|
||||
CCtxParams->ldmParams.hashRateLog = value;
|
||||
return CCtxParams->ldmParams.hashRateLog;
|
||||
|
||||
default: return ERROR(parameter_unsupported);
|
||||
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -694,7 +696,7 @@ size_t ZSTD_CCtxParam_getParameter(
|
|||
break;
|
||||
case ZSTD_c_jobSize :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
assert(CCtxParams->jobSize <= INT_MAX);
|
||||
*value = (int)CCtxParams->jobSize;
|
||||
|
@ -702,14 +704,14 @@ size_t ZSTD_CCtxParam_getParameter(
|
|||
#endif
|
||||
case ZSTD_c_overlapLog :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
*value = CCtxParams->overlapLog;
|
||||
break;
|
||||
#endif
|
||||
case ZSTD_c_rsyncable :
|
||||
#ifndef ZSTD_MULTITHREAD
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
|
||||
#else
|
||||
*value = CCtxParams->rsyncable;
|
||||
break;
|
||||
|
@ -729,7 +731,7 @@ size_t ZSTD_CCtxParam_getParameter(
|
|||
case ZSTD_c_ldmHashRateLog :
|
||||
*value = CCtxParams->ldmParams.hashRateLog;
|
||||
break;
|
||||
default: return ERROR(parameter_unsupported);
|
||||
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -745,8 +747,8 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
|||
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
if (cctx->cdict) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->cdict, stage_wrong);
|
||||
|
||||
cctx->requestedParams = *params;
|
||||
return 0;
|
||||
|
@ -755,7 +757,7 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
|||
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -764,8 +766,9 @@ size_t ZSTD_CCtx_loadDictionary_advanced(
|
|||
ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
|
||||
"no malloc for static CCtx");
|
||||
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
|
||||
ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */
|
||||
if (dict==NULL || dictSize==0) { /* no dictionary mode */
|
||||
|
@ -779,8 +782,7 @@ size_t ZSTD_CCtx_loadDictionary_advanced(
|
|||
dictLoadMethod, dictContentType,
|
||||
cParams, cctx->customMem);
|
||||
cctx->cdict = cctx->cdictLocal;
|
||||
if (cctx->cdictLocal == NULL)
|
||||
return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(cctx->cdictLocal == NULL, memory_allocation);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -801,7 +803,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s
|
|||
|
||||
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
|
||||
{
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
cctx->cdict = cdict;
|
||||
memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */
|
||||
return 0;
|
||||
|
@ -815,7 +817,7 @@ size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSiz
|
|||
size_t ZSTD_CCtx_refPrefix_advanced(
|
||||
ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
cctx->cdict = NULL; /* prefix discards any prior cdict */
|
||||
cctx->prefixDict.dict = prefix;
|
||||
cctx->prefixDict.dictSize = prefixSize;
|
||||
|
@ -834,7 +836,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
|
|||
}
|
||||
if ( (reset == ZSTD_reset_parameters)
|
||||
|| (reset == ZSTD_reset_session_and_parameters) ) {
|
||||
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
|
||||
cctx->cdict = NULL;
|
||||
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
|
||||
}
|
||||
|
@ -974,8 +976,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
|
|||
|
||||
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
|
||||
{
|
||||
/* Estimate CCtx size is supported for single-threaded compression only. */
|
||||
if (params->nbWorkers > 0) { return ERROR(GENERIC); }
|
||||
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
|
||||
{ ZSTD_compressionParameters const cParams =
|
||||
ZSTD_getCParamsFromCCtxParams(params, 0, 0);
|
||||
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
|
||||
|
@ -1023,7 +1024,7 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel)
|
|||
|
||||
size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
|
||||
{
|
||||
if (params->nbWorkers > 0) { return ERROR(GENERIC); }
|
||||
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
|
||||
{ size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
|
||||
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
|
||||
size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
|
||||
|
@ -1368,13 +1369,13 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
|||
DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
|
||||
zc->workSpaceSize >> 10,
|
||||
neededSpace >> 10);
|
||||
/* static cctx : no resize, error out */
|
||||
if (zc->staticSize) return ERROR(memory_allocation);
|
||||
|
||||
RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
|
||||
|
||||
zc->workSpaceSize = 0;
|
||||
ZSTD_free(zc->workSpace, zc->customMem);
|
||||
zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
|
||||
if (zc->workSpace == NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
|
||||
zc->workSpaceSize = neededSpace;
|
||||
zc->workSpaceOversizedDuration = 0;
|
||||
|
||||
|
@ -1645,7 +1646,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
|
|||
ZSTD_buffered_policy_e zbuff)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_copyCCtx_internal");
|
||||
if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
|
||||
|
||||
memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
|
||||
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
|
||||
|
@ -1778,7 +1779,8 @@ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
|
|||
static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
|
||||
{
|
||||
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
|
||||
if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
|
||||
dstSize_tooSmall);
|
||||
MEM_writeLE24(dst, cBlockHeader24);
|
||||
memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
|
||||
return ZSTD_blockHeaderSize + srcSize;
|
||||
|
@ -1789,7 +1791,7 @@ static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void
|
|||
BYTE* const ostart = (BYTE* const)dst;
|
||||
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
||||
|
||||
if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
|
||||
|
||||
switch(flSize)
|
||||
{
|
||||
|
@ -1879,7 +1881,7 @@ static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
}
|
||||
|
||||
if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
|
||||
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
|
||||
{ HUF_repeat repeat = prevHuf->repeatMode;
|
||||
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
|
||||
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
||||
|
@ -2051,21 +2053,17 @@ static size_t ZSTD_fseBitCost(
|
|||
unsigned s;
|
||||
FSE_CState_t cstate;
|
||||
FSE_initCState(&cstate, ctable);
|
||||
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
|
||||
RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
|
||||
"Repeat FSE_CTable has maxSymbolValue %u < %u",
|
||||
ZSTD_getFSEMaxSymbolValue(ctable), max);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned const tableLog = cstate.stateLog;
|
||||
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
|
||||
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
|
||||
if (count[s] == 0)
|
||||
continue;
|
||||
if (bitCost >= badCost) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
|
||||
"Repeat FSE_CTable has Prob[%u] == 0", s);
|
||||
cost += count[s] * bitCost;
|
||||
}
|
||||
return cost >> kAccuracyLog;
|
||||
|
@ -2081,7 +2079,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
|
|||
BYTE wksp[FSE_NCOUNTBOUND];
|
||||
S16 norm[MaxSeq + 1];
|
||||
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
||||
CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
|
||||
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
|
||||
}
|
||||
|
||||
|
@ -2187,15 +2185,15 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
|
||||
switch (type) {
|
||||
case set_rle:
|
||||
CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
|
||||
if (dstCapacity==0) return ERROR(dstSize_tooSmall);
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
|
||||
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
|
||||
*op = codeTable[0];
|
||||
return 1;
|
||||
case set_repeat:
|
||||
memcpy(nextCTable, prevCTable, prevCTableSize);
|
||||
return 0;
|
||||
case set_basic:
|
||||
CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
|
||||
return 0;
|
||||
case set_compressed: {
|
||||
S16 norm[MaxSeq + 1];
|
||||
|
@ -2206,14 +2204,14 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
nbSeq_1--;
|
||||
}
|
||||
assert(nbSeq_1 > 1);
|
||||
CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
|
||||
if (FSE_isError(NCountSize)) return NCountSize;
|
||||
CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
|
||||
FORWARD_IF_ERROR(NCountSize);
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
|
||||
return NCountSize;
|
||||
}
|
||||
}
|
||||
default: return assert(0), ERROR(GENERIC);
|
||||
default: assert(0); RETURN_ERROR(GENERIC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2230,7 +2228,9 @@ ZSTD_encodeSequences_body(
|
|||
FSE_CState_t stateOffsetBits;
|
||||
FSE_CState_t stateLitLength;
|
||||
|
||||
CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
|
||||
dstSize_tooSmall, "not enough space remaining");
|
||||
DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
|
||||
(int)(blockStream.endPtr - blockStream.startPtr),
|
||||
(unsigned)dstCapacity);
|
||||
|
@ -2304,7 +2304,7 @@ ZSTD_encodeSequences_body(
|
|||
FSE_flushCState(&blockStream, &stateLitLength);
|
||||
|
||||
{ size_t const streamSize = BIT_closeCStream(&blockStream);
|
||||
if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
|
||||
RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
|
||||
return streamSize;
|
||||
}
|
||||
}
|
||||
|
@ -2412,14 +2412,14 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
|
|||
literals, litSize,
|
||||
workspace, wkspSize,
|
||||
bmi2);
|
||||
if (ZSTD_isError(cSize))
|
||||
return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
assert(cSize <= dstCapacity);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
/* Sequences Header */
|
||||
if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
|
||||
dstSize_tooSmall);
|
||||
if (nbSeq < 0x7F)
|
||||
*op++ = (BYTE)nbSeq;
|
||||
else if (nbSeq < LONGNBSEQ)
|
||||
|
@ -2453,7 +2453,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
|
|||
count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
|
||||
prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
|
||||
workspace, wkspSize);
|
||||
if (ZSTD_isError(countSize)) return countSize;
|
||||
FORWARD_IF_ERROR(countSize);
|
||||
if (LLtype == set_compressed)
|
||||
lastNCount = op;
|
||||
op += countSize;
|
||||
|
@ -2475,7 +2475,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
|
|||
count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
|
||||
prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
|
||||
workspace, wkspSize);
|
||||
if (ZSTD_isError(countSize)) return countSize;
|
||||
FORWARD_IF_ERROR(countSize);
|
||||
if (Offtype == set_compressed)
|
||||
lastNCount = op;
|
||||
op += countSize;
|
||||
|
@ -2495,7 +2495,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
|
|||
count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
|
||||
prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
|
||||
workspace, wkspSize);
|
||||
if (ZSTD_isError(countSize)) return countSize;
|
||||
FORWARD_IF_ERROR(countSize);
|
||||
if (MLtype == set_compressed)
|
||||
lastNCount = op;
|
||||
op += countSize;
|
||||
|
@ -2510,7 +2510,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
|
|||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq,
|
||||
longOffsets, bmi2);
|
||||
if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
|
||||
FORWARD_IF_ERROR(bitstreamSize);
|
||||
op += bitstreamSize;
|
||||
/* zstd versions <= 1.3.4 mistakenly report corruption when
|
||||
* FSE_readNCount() recieves a buffer < 4 bytes.
|
||||
|
@ -2553,7 +2553,7 @@ ZSTD_compressSequences(seqStore_t* seqStorePtr,
|
|||
*/
|
||||
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
|
||||
return 0; /* block not compressed */
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
|
||||
/* Check compressibility */
|
||||
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
|
||||
|
@ -2680,7 +2680,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
|
|||
ldmSeqStore.seq = zc->ldmSequences;
|
||||
ldmSeqStore.capacity = zc->maxNbLdmSequences;
|
||||
/* Updates ldmSeqStore.size */
|
||||
CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
|
||||
FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
|
||||
&zc->appliedParams.ldmParams,
|
||||
src, srcSize));
|
||||
/* Updates ldmSeqStore.pos */
|
||||
|
@ -2753,8 +2753,9 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
|
|||
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
|
||||
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
|
||||
|
||||
if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
|
||||
return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
|
||||
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
|
||||
dstSize_tooSmall,
|
||||
"not enough space to store compressed block");
|
||||
if (remaining < blockSize) blockSize = remaining;
|
||||
|
||||
if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
|
||||
|
@ -2775,11 +2776,11 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
|
|||
{ size_t cSize = ZSTD_compressBlock_internal(cctx,
|
||||
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
|
||||
ip, blockSize);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
|
||||
if (cSize == 0) { /* block is not compressible */
|
||||
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
} else {
|
||||
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
|
||||
MEM_writeLE24(op, cBlockHeader24);
|
||||
|
@ -2816,7 +2817,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
|
|||
size_t pos=0;
|
||||
|
||||
assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
|
||||
if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
|
||||
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
|
||||
!params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
|
||||
|
||||
|
@ -2852,7 +2853,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
|
|||
*/
|
||||
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
|
||||
{
|
||||
if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
|
||||
{ U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
|
||||
MEM_writeLE24(dst, cBlockHeader24);
|
||||
return ZSTD_blockHeaderSize;
|
||||
|
@ -2861,10 +2862,9 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
|
|||
|
||||
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
|
||||
{
|
||||
if (cctx->stage != ZSTDcs_init)
|
||||
return ERROR(stage_wrong);
|
||||
if (cctx->appliedParams.ldmParams.enableLdm)
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
|
||||
RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
|
||||
parameter_unsupported);
|
||||
cctx->externSeqStore.seq = seq;
|
||||
cctx->externSeqStore.size = nbSeq;
|
||||
cctx->externSeqStore.capacity = nbSeq;
|
||||
|
@ -2883,12 +2883,13 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
|
|||
|
||||
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
|
||||
cctx->stage, (unsigned)srcSize);
|
||||
if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
|
||||
RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
|
||||
"missing init (ZSTD_compressBegin)");
|
||||
|
||||
if (frame && (cctx->stage==ZSTDcs_init)) {
|
||||
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
|
||||
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
|
||||
if (ZSTD_isError(fhSize)) return fhSize;
|
||||
FORWARD_IF_ERROR(fhSize);
|
||||
dstCapacity -= fhSize;
|
||||
dst = (char*)dst + fhSize;
|
||||
cctx->stage = ZSTDcs_ongoing;
|
||||
|
@ -2923,17 +2924,18 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
|
|||
{ size_t const cSize = frame ?
|
||||
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
|
||||
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
cctx->consumedSrcSize += srcSize;
|
||||
cctx->producedCSize += (cSize + fhSize);
|
||||
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
|
||||
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
|
||||
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
|
||||
if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
|
||||
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
|
||||
(unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
RETURN_ERROR_IF(
|
||||
cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
|
||||
srcSize_wrong,
|
||||
"error : pledgedSrcSize = %u, while realSrcSize >= %u",
|
||||
(unsigned)cctx->pledgedSrcSizePlusOne-1,
|
||||
(unsigned)cctx->consumedSrcSize);
|
||||
}
|
||||
return cSize + fhSize;
|
||||
}
|
||||
|
@ -2958,7 +2960,7 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
|
|||
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
|
||||
if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
|
||||
|
||||
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
|
||||
}
|
||||
|
@ -3021,9 +3023,9 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
|
|||
NOTE: This behavior is not standard and could be improved in the future. */
|
||||
static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
|
||||
U32 s;
|
||||
if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
|
||||
for (s = 0; s <= maxSymbolValue; ++s) {
|
||||
if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -3061,53 +3063,56 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
|
|||
|
||||
{ unsigned maxSymbolValue = 255;
|
||||
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
|
||||
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
|
||||
dictPtr += hufHeaderSize;
|
||||
}
|
||||
|
||||
{ unsigned offcodeLog;
|
||||
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
|
||||
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
|
||||
/* fill all offset symbols to avoid garbage at end of table */
|
||||
CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
|
||||
offcodeNCount, MaxOff, offcodeLog,
|
||||
workspace, HUF_WORKSPACE_SIZE),
|
||||
dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
|
||||
bs->entropy.fse.offcodeCTable,
|
||||
offcodeNCount, MaxOff, offcodeLog,
|
||||
workspace, HUF_WORKSPACE_SIZE)),
|
||||
dictionary_corrupted);
|
||||
dictPtr += offcodeHeaderSize;
|
||||
}
|
||||
|
||||
{ short matchlengthNCount[MaxML+1];
|
||||
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
|
||||
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
|
||||
/* Every match length code must have non-zero probability */
|
||||
CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
|
||||
CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
|
||||
matchlengthNCount, matchlengthMaxValue, matchlengthLog,
|
||||
workspace, HUF_WORKSPACE_SIZE),
|
||||
dictionary_corrupted);
|
||||
FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
|
||||
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
|
||||
bs->entropy.fse.matchlengthCTable,
|
||||
matchlengthNCount, matchlengthMaxValue, matchlengthLog,
|
||||
workspace, HUF_WORKSPACE_SIZE)),
|
||||
dictionary_corrupted);
|
||||
dictPtr += matchlengthHeaderSize;
|
||||
}
|
||||
|
||||
{ short litlengthNCount[MaxLL+1];
|
||||
unsigned litlengthMaxValue = MaxLL, litlengthLog;
|
||||
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
|
||||
/* Every literal length code must have non-zero probability */
|
||||
CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
|
||||
CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
|
||||
litlengthNCount, litlengthMaxValue, litlengthLog,
|
||||
workspace, HUF_WORKSPACE_SIZE),
|
||||
dictionary_corrupted);
|
||||
FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
|
||||
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
|
||||
bs->entropy.fse.litlengthCTable,
|
||||
litlengthNCount, litlengthMaxValue, litlengthLog,
|
||||
workspace, HUF_WORKSPACE_SIZE)),
|
||||
dictionary_corrupted);
|
||||
dictPtr += litlengthHeaderSize;
|
||||
}
|
||||
|
||||
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
|
||||
bs->rep[0] = MEM_readLE32(dictPtr+0);
|
||||
bs->rep[1] = MEM_readLE32(dictPtr+4);
|
||||
bs->rep[2] = MEM_readLE32(dictPtr+8);
|
||||
|
@ -3120,19 +3125,19 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
|
|||
offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
|
||||
}
|
||||
/* All offset values <= dictContentSize + 128 KB must be representable */
|
||||
CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
|
||||
FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
|
||||
/* All repCodes must be <= dictContentSize and != 0*/
|
||||
{ U32 u;
|
||||
for (u=0; u<3; u++) {
|
||||
if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
|
||||
if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
|
||||
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
|
||||
} }
|
||||
|
||||
bs->entropy.huf.repeatMode = HUF_repeat_valid;
|
||||
bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
|
||||
bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
|
||||
bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
|
||||
CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
|
||||
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
|
||||
return dictID;
|
||||
}
|
||||
}
|
||||
|
@ -3162,8 +3167,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
|
|||
DEBUGLOG(4, "raw content dictionary detected");
|
||||
return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
|
||||
}
|
||||
if (dictContentType == ZSTD_dct_fullDict)
|
||||
return ERROR(dictionary_wrong);
|
||||
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
|
||||
assert(0); /* impossible */
|
||||
}
|
||||
|
||||
|
@ -3190,13 +3194,13 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
|
|||
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
|
||||
}
|
||||
|
||||
CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
|
||||
FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
|
||||
ZSTDcrp_continue, zbuff) );
|
||||
{
|
||||
size_t const dictID = ZSTD_compress_insertDictionary(
|
||||
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
|
||||
¶ms, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
|
||||
if (ZSTD_isError(dictID)) return dictID;
|
||||
FORWARD_IF_ERROR(dictID);
|
||||
assert(dictID <= (size_t)(U32)-1);
|
||||
cctx->dictID = (U32)dictID;
|
||||
}
|
||||
|
@ -3213,7 +3217,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
|
|||
{
|
||||
DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
|
||||
/* compression parameters verification and optimization */
|
||||
CHECK_F( ZSTD_checkCParams(params.cParams) );
|
||||
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
|
||||
return ZSTD_compressBegin_internal(cctx,
|
||||
dict, dictSize, dictContentType, dtlm,
|
||||
cdict,
|
||||
|
@ -3261,12 +3265,12 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
|
|||
size_t fhSize = 0;
|
||||
|
||||
DEBUGLOG(4, "ZSTD_writeEpilogue");
|
||||
if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */
|
||||
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
|
||||
|
||||
/* special case : empty frame */
|
||||
if (cctx->stage == ZSTDcs_init) {
|
||||
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
|
||||
if (ZSTD_isError(fhSize)) return fhSize;
|
||||
FORWARD_IF_ERROR(fhSize);
|
||||
dstCapacity -= fhSize;
|
||||
op += fhSize;
|
||||
cctx->stage = ZSTDcs_ongoing;
|
||||
|
@ -3275,7 +3279,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
|
|||
if (cctx->stage != ZSTDcs_ending) {
|
||||
/* write one last empty block, make it the "last" block */
|
||||
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
|
||||
if (dstCapacity<4) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
|
||||
MEM_writeLE32(op, cBlockHeader24);
|
||||
op += ZSTD_blockHeaderSize;
|
||||
dstCapacity -= ZSTD_blockHeaderSize;
|
||||
|
@ -3283,7 +3287,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
|
|||
|
||||
if (cctx->appliedParams.fParams.checksumFlag) {
|
||||
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
|
||||
if (dstCapacity<4) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
|
||||
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
|
||||
MEM_writeLE32(op, checksum);
|
||||
op += 4;
|
||||
|
@ -3301,18 +3305,20 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
|
|||
size_t const cSize = ZSTD_compressContinue_internal(cctx,
|
||||
dst, dstCapacity, src, srcSize,
|
||||
1 /* frame mode */, 1 /* last chunk */);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
|
||||
if (ZSTD_isError(endResult)) return endResult;
|
||||
FORWARD_IF_ERROR(endResult);
|
||||
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
|
||||
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
|
||||
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
|
||||
DEBUGLOG(4, "end of frame : controlling src size");
|
||||
if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
|
||||
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
|
||||
(unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
|
||||
return ERROR(srcSize_wrong);
|
||||
} }
|
||||
RETURN_ERROR_IF(
|
||||
cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
|
||||
srcSize_wrong,
|
||||
"error : pledgedSrcSize = %u, while realSrcSize = %u",
|
||||
(unsigned)cctx->pledgedSrcSizePlusOne-1,
|
||||
(unsigned)cctx->consumedSrcSize);
|
||||
}
|
||||
return cSize + endResult;
|
||||
}
|
||||
|
||||
|
@ -3340,7 +3346,7 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
|
|||
ZSTD_parameters params)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_compress_advanced");
|
||||
CHECK_F(ZSTD_checkCParams(params.cParams));
|
||||
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
|
||||
return ZSTD_compress_internal(cctx,
|
||||
dst, dstCapacity,
|
||||
src, srcSize,
|
||||
|
@ -3357,7 +3363,7 @@ size_t ZSTD_compress_advanced_internal(
|
|||
ZSTD_CCtx_params params)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
|
||||
CHECK_F( ZSTD_compressBegin_internal(cctx,
|
||||
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
|
||||
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
|
||||
params, srcSize, ZSTDb_not_buffered) );
|
||||
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
|
||||
|
@ -3441,7 +3447,7 @@ static size_t ZSTD_initCDict_internal(
|
|||
void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
|
||||
cdict->dictBuffer = internalBuffer;
|
||||
cdict->dictContent = internalBuffer;
|
||||
if (!internalBuffer) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(!internalBuffer, memory_allocation);
|
||||
memcpy(internalBuffer, dictBuffer, dictSize);
|
||||
}
|
||||
cdict->dictContentSize = dictSize;
|
||||
|
@ -3467,7 +3473,7 @@ static size_t ZSTD_initCDict_internal(
|
|||
&cdict->cBlockState, &cdict->matchState, ¶ms,
|
||||
cdict->dictContent, cdict->dictContentSize,
|
||||
dictContentType, ZSTD_dtlm_full, cdict->workspace);
|
||||
if (ZSTD_isError(dictID)) return dictID;
|
||||
FORWARD_IF_ERROR(dictID);
|
||||
assert(dictID <= (size_t)(U32)-1);
|
||||
cdict->dictID = (U32)dictID;
|
||||
}
|
||||
|
@ -3597,7 +3603,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
|
|||
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
|
||||
if (cdict==NULL) return ERROR(dictionary_wrong);
|
||||
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
|
||||
{ ZSTD_CCtx_params params = cctx->requestedParams;
|
||||
params.cParams = ZSTD_getCParamsFromCDict(cdict);
|
||||
/* Increase window log to fit the entire dictionary and source if the
|
||||
|
@ -3633,7 +3639,7 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
|
|||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
|
||||
{
|
||||
CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
|
||||
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
|
||||
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
|
||||
}
|
||||
|
||||
|
@ -3701,7 +3707,7 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
|
|||
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
|
||||
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
|
||||
|
||||
CHECK_F( ZSTD_compressBegin_internal(cctx,
|
||||
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
|
||||
dict, dictSize, dictContentType, ZSTD_dtlm_fast,
|
||||
cdict,
|
||||
params, pledgedSrcSize,
|
||||
|
@ -3743,16 +3749,15 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
|
|||
|
||||
if (dict && dictSize >= 8) {
|
||||
DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
|
||||
if (zcs->staticSize) { /* static CCtx : never uses malloc */
|
||||
/* incompatible with internal cdict creation */
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
RETURN_ERROR_IF(
|
||||
zcs->staticSize, memory_allocation,
|
||||
"static CCtx: incompatible with internal cdict creation");
|
||||
ZSTD_freeCDict(zcs->cdictLocal);
|
||||
zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
|
||||
ZSTD_dlm_byCopy, ZSTD_dct_auto,
|
||||
params.cParams, zcs->customMem);
|
||||
zcs->cdict = zcs->cdictLocal;
|
||||
if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(zcs->cdictLocal == NULL, memory_allocation);
|
||||
} else {
|
||||
if (cdict) {
|
||||
params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict; it includes windowLog */
|
||||
|
@ -3773,7 +3778,8 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
|
|||
unsigned long long pledgedSrcSize)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
|
||||
if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
|
||||
RETURN_ERROR_IF(!cdict, dictionary_wrong,
|
||||
"cannot handle NULL cdict (does not know what to do)");
|
||||
{ ZSTD_CCtx_params params = zcs->requestedParams;
|
||||
params.cParams = ZSTD_getCParamsFromCDict(cdict);
|
||||
params.fParams = fParams;
|
||||
|
@ -3802,7 +3808,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
|
|||
{
|
||||
DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
|
||||
(unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
|
||||
CHECK_F( ZSTD_checkCParams(params.cParams) );
|
||||
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
|
||||
if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
|
||||
zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
|
||||
return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
|
||||
|
@ -3874,8 +3880,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
|||
switch(zcs->streamStage)
|
||||
{
|
||||
case zcss_init:
|
||||
/* call ZSTD_initCStream() first ! */
|
||||
return ERROR(init_missing);
|
||||
RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
|
||||
|
||||
case zcss_load:
|
||||
if ( (flushMode == ZSTD_e_end)
|
||||
|
@ -3885,7 +3890,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
|||
size_t const cSize = ZSTD_compressEnd(zcs,
|
||||
op, oend-op, ip, iend-ip);
|
||||
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
ip = iend;
|
||||
op += cSize;
|
||||
zcs->frameEnded = 1;
|
||||
|
@ -3926,7 +3931,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
|||
zcs->inBuff + zcs->inToCompress, iSize) :
|
||||
ZSTD_compressContinue(zcs, cDst, oSize,
|
||||
zcs->inBuff + zcs->inToCompress, iSize);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
FORWARD_IF_ERROR(cSize);
|
||||
zcs->frameEnded = lastBlock;
|
||||
/* prepare next block */
|
||||
zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
|
||||
|
@ -4002,7 +4007,7 @@ static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
|
|||
|
||||
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
||||
{
|
||||
CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
|
||||
FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
|
||||
return ZSTD_nextInputSizeHint_MTorST(zcs);
|
||||
}
|
||||
|
||||
|
@ -4014,8 +4019,8 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
|
|||
{
|
||||
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
|
||||
/* check conditions */
|
||||
if (output->pos > output->size) return ERROR(GENERIC);
|
||||
if (input->pos > input->size) return ERROR(GENERIC);
|
||||
RETURN_ERROR_IF(output->pos > output->size, GENERIC);
|
||||
RETURN_ERROR_IF(input->pos > input->size, GENERIC);
|
||||
assert(cctx!=NULL);
|
||||
|
||||
/* transparent initialization stage */
|
||||
|
@ -4040,11 +4045,11 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
|
|||
DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
|
||||
params.nbWorkers);
|
||||
cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
|
||||
if (cctx->mtctx == NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
|
||||
}
|
||||
/* mt compression */
|
||||
DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
|
||||
CHECK_F( ZSTDMT_initCStream_internal(
|
||||
FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
|
||||
cctx->mtctx,
|
||||
prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
|
||||
cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
|
||||
|
@ -4052,7 +4057,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
|
|||
cctx->appliedParams.nbWorkers = params.nbWorkers;
|
||||
} else
|
||||
#endif
|
||||
{ CHECK_F( ZSTD_resetCStream_internal(cctx,
|
||||
{ FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
|
||||
prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
|
||||
cctx->cdict,
|
||||
params, cctx->pledgedSrcSizePlusOne-1) );
|
||||
|
@ -4077,7 +4082,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
|
|||
return flushMin;
|
||||
} }
|
||||
#endif
|
||||
CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
|
||||
FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
|
||||
DEBUGLOG(5, "completed ZSTD_compressStream2");
|
||||
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
|
||||
}
|
||||
|
@ -4108,10 +4113,10 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
|
|||
dst, dstCapacity, &oPos,
|
||||
src, srcSize, &iPos,
|
||||
ZSTD_e_end);
|
||||
if (ZSTD_isError(result)) return result;
|
||||
FORWARD_IF_ERROR(result);
|
||||
if (result != 0) { /* compression not completed, due to lack of output space */
|
||||
assert(oPos == dstCapacity);
|
||||
return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR(dstSize_tooSmall);
|
||||
}
|
||||
assert(iPos == srcSize); /* all input is expected consumed */
|
||||
return oPos;
|
||||
|
@ -4133,7 +4138,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
|
|||
{
|
||||
ZSTD_inBuffer input = { NULL, 0, 0 };
|
||||
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
|
||||
CHECK_F( remainingToFlush );
|
||||
FORWARD_IF_ERROR( remainingToFlush );
|
||||
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
|
||||
/* single thread mode : attempt to calculate remaining to flush more precisely */
|
||||
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
|
||||
|
|
|
@ -1056,7 +1056,7 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
|||
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
|
||||
{
|
||||
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
|
||||
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
|
||||
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
|
||||
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
|
||||
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
|
||||
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
|
||||
|
@ -1263,7 +1263,7 @@ static size_t ZSTDMT_compress_advanced_internal(
|
|||
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
|
||||
return ERROR(memory_allocation);
|
||||
|
||||
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
|
||||
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
|
||||
|
||||
{ unsigned u;
|
||||
for (u=0; u<nbJobs; u++) {
|
||||
|
@ -1396,7 +1396,7 @@ size_t ZSTDMT_initCStream_internal(
|
|||
|
||||
/* init */
|
||||
if (params.nbWorkers != mtctx->params.nbWorkers)
|
||||
CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
|
||||
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
|
||||
|
||||
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
|
||||
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
|
||||
|
@ -2051,7 +2051,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|||
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
|
||||
size_t const jobSize = mtctx->inBuff.filled;
|
||||
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
|
||||
CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
|
||||
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
|
||||
}
|
||||
|
||||
/* check for potential compressed data ready to be flushed */
|
||||
|
@ -2065,7 +2065,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|||
|
||||
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
||||
{
|
||||
CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
|
||||
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
|
||||
|
||||
/* recommended next input size : fill current input buffer */
|
||||
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
|
||||
|
@ -2082,7 +2082,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
|
|||
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
|
||||
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
|
||||
(U32)srcSize, (U32)endFrame);
|
||||
CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
|
||||
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
|
||||
}
|
||||
|
||||
/* check if there is any data available to flush */
|
||||
|
|
|
@ -105,9 +105,9 @@ ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
|
|||
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
|
||||
|
||||
/* load entropy tables */
|
||||
CHECK_E( ZSTD_loadDEntropy(&ddict->entropy,
|
||||
ddict->dictContent, ddict->dictSize),
|
||||
dictionary_corrupted );
|
||||
RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
|
||||
&ddict->entropy, ddict->dictContent, ddict->dictSize)),
|
||||
dictionary_corrupted);
|
||||
ddict->entropyPresent = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
|
|||
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
|
||||
|
||||
/* parse dictionary content */
|
||||
CHECK_F( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
|
||||
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ ZSTD_DCtx* ZSTD_createDCtx(void)
|
|||
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
|
||||
{
|
||||
if (dctx==NULL) return 0; /* support free on NULL */
|
||||
if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */
|
||||
RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
|
||||
{ ZSTD_customMem const cMem = dctx->customMem;
|
||||
ZSTD_freeDDict(dctx->ddictLocal);
|
||||
dctx->ddictLocal = NULL;
|
||||
|
@ -203,7 +203,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
|
|||
static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
|
||||
{
|
||||
size_t const minInputSize = ZSTD_startingInputLength(format);
|
||||
if (srcSize < minInputSize) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
|
||||
|
||||
{ BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
|
||||
U32 const dictID= fhd & 3;
|
||||
|
@ -238,7 +238,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
|
|||
|
||||
memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
|
||||
if (srcSize < minInputSize) return minInputSize;
|
||||
if (src==NULL) return ERROR(GENERIC); /* invalid parameter */
|
||||
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
|
||||
|
||||
if ( (format != ZSTD_f_zstd1_magicless)
|
||||
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
|
||||
|
@ -251,7 +251,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
|
|||
zfhPtr->frameType = ZSTD_skippableFrame;
|
||||
return 0;
|
||||
}
|
||||
return ERROR(prefix_unknown);
|
||||
RETURN_ERROR(prefix_unknown);
|
||||
}
|
||||
|
||||
/* ensure there is enough `srcSize` to fully read/decode frame header */
|
||||
|
@ -269,14 +269,13 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
|
|||
U64 windowSize = 0;
|
||||
U32 dictID = 0;
|
||||
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
|
||||
if ((fhdByte & 0x08) != 0)
|
||||
return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
|
||||
RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
|
||||
"reserved bits, must be zero");
|
||||
|
||||
if (!singleSegment) {
|
||||
BYTE const wlByte = ip[pos++];
|
||||
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
|
||||
if (windowLog > ZSTD_WINDOWLOG_MAX)
|
||||
return ERROR(frameParameter_windowTooLarge);
|
||||
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
|
||||
windowSize = (1ULL << windowLog);
|
||||
windowSize += (windowSize >> 3) * (wlByte&7);
|
||||
}
|
||||
|
@ -348,12 +347,11 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
|
|||
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
|
||||
U32 sizeU32;
|
||||
|
||||
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
|
||||
return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
|
||||
|
||||
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
|
||||
if ((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32)
|
||||
return ERROR(frameParameter_unsupported);
|
||||
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
|
||||
frameParameter_unsupported);
|
||||
|
||||
return skippableHeaderSize + sizeU32;
|
||||
}
|
||||
|
@ -428,9 +426,9 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
|
|||
{
|
||||
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
|
||||
if (ZSTD_isError(result)) return result; /* invalid header */
|
||||
if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
|
||||
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
|
||||
return ERROR(dictionary_wrong);
|
||||
RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
|
||||
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
|
||||
dictionary_wrong);
|
||||
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -459,7 +457,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
|||
/* Extract Frame Header */
|
||||
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
|
||||
if (ZSTD_isError(ret)) return ret;
|
||||
if (ret > 0) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(ret > 0, srcSize_wrong);
|
||||
}
|
||||
|
||||
ip += zfh.headerSize;
|
||||
|
@ -471,8 +469,8 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
|||
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(ZSTD_blockHeaderSize + cBlockSize > remainingSize,
|
||||
srcSize_wrong);
|
||||
|
||||
ip += ZSTD_blockHeaderSize + cBlockSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
|
||||
|
@ -481,7 +479,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
|||
}
|
||||
|
||||
if (zfh.checksumFlag) { /* Final frame content checksum */
|
||||
if (remainingSize < 4) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(remainingSize < 4, srcSize_wrong);
|
||||
ip += 4;
|
||||
}
|
||||
|
||||
|
@ -522,9 +520,9 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
|
|||
DEBUGLOG(5, "ZSTD_copyRawBlock");
|
||||
if (dst == NULL) {
|
||||
if (srcSize == 0) return 0;
|
||||
return ERROR(dstBuffer_null);
|
||||
RETURN_ERROR(dstBuffer_null);
|
||||
}
|
||||
if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
|
||||
memcpy(dst, src, srcSize);
|
||||
return srcSize;
|
||||
}
|
||||
|
@ -535,9 +533,9 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
|
|||
{
|
||||
if (dst == NULL) {
|
||||
if (regenSize == 0) return 0;
|
||||
return ERROR(dstBuffer_null);
|
||||
RETURN_ERROR(dstBuffer_null);
|
||||
}
|
||||
if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
|
||||
memset(dst, b, regenSize);
|
||||
return regenSize;
|
||||
}
|
||||
|
@ -560,15 +558,16 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
|||
DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
|
||||
|
||||
/* check */
|
||||
if (remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(
|
||||
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
|
||||
srcSize_wrong);
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
|
||||
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
|
||||
if (remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
|
||||
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
|
||||
srcSize_wrong);
|
||||
FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
|
||||
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
|
||||
}
|
||||
|
||||
|
@ -581,7 +580,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
|||
|
||||
ip += ZSTD_blockHeaderSize;
|
||||
remainingSrcSize -= ZSTD_blockHeaderSize;
|
||||
if (cBlockSize > remainingSrcSize) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
|
||||
|
||||
switch(blockProperties.blockType)
|
||||
{
|
||||
|
@ -596,7 +595,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
|||
break;
|
||||
case bt_reserved :
|
||||
default:
|
||||
return ERROR(corruption_detected);
|
||||
RETURN_ERROR(corruption_detected);
|
||||
}
|
||||
|
||||
if (ZSTD_isError(decodedSize)) return decodedSize;
|
||||
|
@ -609,15 +608,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
|||
}
|
||||
|
||||
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
|
||||
if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
|
||||
return ERROR(corruption_detected);
|
||||
} }
|
||||
RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
|
||||
corruption_detected);
|
||||
}
|
||||
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
|
||||
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
|
||||
U32 checkRead;
|
||||
if (remainingSrcSize<4) return ERROR(checksum_wrong);
|
||||
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
|
||||
checkRead = MEM_readLE32(ip);
|
||||
if (checkRead != checkCalc) return ERROR(checksum_wrong);
|
||||
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
|
||||
ip += 4;
|
||||
remainingSrcSize -= 4;
|
||||
}
|
||||
|
@ -652,8 +651,8 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
|||
size_t decodedSize;
|
||||
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
if (ZSTD_isError(frameSize)) return frameSize;
|
||||
/* legacy support is not compatible with static dctx */
|
||||
if (dctx->staticSize) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
|
||||
"legacy support is not compatible with static dctx");
|
||||
|
||||
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
|
||||
if (ZSTD_isError(decodedSize)) return decodedSize;
|
||||
|
@ -676,7 +675,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
|||
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
|
||||
if (ZSTD_isError(skippableSize))
|
||||
return skippableSize;
|
||||
if (srcSize < skippableSize) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize < skippableSize, srcSize_wrong);
|
||||
|
||||
src = (const BYTE *)src + skippableSize;
|
||||
srcSize -= skippableSize;
|
||||
|
@ -685,29 +684,29 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
|||
|
||||
if (ddict) {
|
||||
/* we were called from ZSTD_decompress_usingDDict */
|
||||
CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
|
||||
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
|
||||
} else {
|
||||
/* this will initialize correctly with no dict if dict == NULL, so
|
||||
* use this in all cases but ddict */
|
||||
CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
|
||||
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
|
||||
}
|
||||
ZSTD_checkContinuity(dctx, dst);
|
||||
|
||||
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
|
||||
&src, &srcSize);
|
||||
if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
|
||||
&& (moreThan1Frame==1) ) {
|
||||
/* at least one frame successfully completed,
|
||||
* but following bytes are garbage :
|
||||
* it's more likely to be a srcSize error,
|
||||
* specifying more bytes than compressed size of frame(s).
|
||||
* This error message replaces ERROR(prefix_unknown),
|
||||
* which would be confusing, as the first header is actually correct.
|
||||
* Note that one could be unlucky, it might be a corruption error instead,
|
||||
* happening right at the place where we expect zstd magic bytes.
|
||||
* But this is _much_ less likely than a srcSize field error. */
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
RETURN_ERROR_IF(
|
||||
(ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
|
||||
&& (moreThan1Frame==1),
|
||||
srcSize_wrong,
|
||||
"at least one frame successfully completed, but following "
|
||||
"bytes are garbage: it's more likely to be a srcSize error, "
|
||||
"specifying more bytes than compressed size of frame(s). This "
|
||||
"error message replaces ERROR(prefix_unknown), which would be "
|
||||
"confusing, as the first header is actually correct. Note that "
|
||||
"one could be unlucky, it might be a corruption error instead, "
|
||||
"happening right at the place where we expect zstd magic "
|
||||
"bytes. But this is _much_ less likely than a srcSize field "
|
||||
"error.");
|
||||
if (ZSTD_isError(res)) return res;
|
||||
assert(res <= dstCapacity);
|
||||
dst = (BYTE*)dst + res;
|
||||
|
@ -716,7 +715,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
|||
moreThan1Frame = 1;
|
||||
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
|
||||
|
||||
if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
|
||||
RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
|
||||
|
||||
return (BYTE*)dst - (BYTE*)dststart;
|
||||
}
|
||||
|
@ -741,7 +740,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
|
|||
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
|
||||
size_t regenSize;
|
||||
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(dctx==NULL, memory_allocation);
|
||||
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
|
||||
ZSTD_freeDCtx(dctx);
|
||||
return regenSize;
|
||||
|
@ -791,8 +790,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
{
|
||||
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
|
||||
/* Sanity check */
|
||||
if (srcSize != dctx->expected)
|
||||
return ERROR(srcSize_wrong); /* not allowed */
|
||||
RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
|
||||
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
|
||||
|
||||
switch (dctx->stage)
|
||||
|
@ -817,7 +815,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
case ZSTDds_decodeFrameHeader:
|
||||
assert(src != NULL);
|
||||
memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
|
||||
CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
|
||||
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
|
||||
dctx->expected = ZSTD_blockHeaderSize;
|
||||
dctx->stage = ZSTDds_decodeBlockHeader;
|
||||
return 0;
|
||||
|
@ -867,7 +865,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
break;
|
||||
case bt_reserved : /* should never happen */
|
||||
default:
|
||||
return ERROR(corruption_detected);
|
||||
RETURN_ERROR(corruption_detected);
|
||||
}
|
||||
if (ZSTD_isError(rSize)) return rSize;
|
||||
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
|
||||
|
@ -876,10 +874,10 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
|
||||
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
|
||||
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
|
||||
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
|
||||
if (dctx->decodedSize != dctx->fParams.frameContentSize) {
|
||||
return ERROR(corruption_detected);
|
||||
} }
|
||||
RETURN_ERROR_IF(
|
||||
dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
|
||||
&& dctx->decodedSize != dctx->fParams.frameContentSize,
|
||||
corruption_detected);
|
||||
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
|
||||
dctx->expected = 4;
|
||||
dctx->stage = ZSTDds_checkChecksum;
|
||||
|
@ -900,7 +898,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
|
||||
U32 const check32 = MEM_readLE32(src);
|
||||
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
|
||||
if (check32 != h32) return ERROR(checksum_wrong);
|
||||
RETURN_ERROR_IF(check32 != h32, checksum_wrong);
|
||||
dctx->expected = 0;
|
||||
dctx->stage = ZSTDds_getFrameHeaderSize;
|
||||
return 0;
|
||||
|
@ -921,7 +919,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||
|
||||
default:
|
||||
assert(0); /* impossible */
|
||||
return ERROR(GENERIC); /* some compiler require default to do something */
|
||||
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -945,7 +943,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
|||
const BYTE* dictPtr = (const BYTE*)dict;
|
||||
const BYTE* const dictEnd = dictPtr + dictSize;
|
||||
|
||||
if (dictSize <= 8) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
|
||||
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
|
||||
dictPtr += 8; /* skip header = magic + dictID */
|
||||
|
||||
|
@ -964,16 +962,16 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
|||
dictPtr, dictEnd - dictPtr,
|
||||
workspace, workspaceSize);
|
||||
#endif
|
||||
if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
|
||||
dictPtr += hSize;
|
||||
}
|
||||
|
||||
{ short offcodeNCount[MaxOff+1];
|
||||
unsigned offcodeMaxValue = MaxOff, offcodeLog;
|
||||
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
|
||||
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
|
||||
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
|
||||
ZSTD_buildFSETable( entropy->OFTable,
|
||||
offcodeNCount, offcodeMaxValue,
|
||||
OF_base, OF_bits,
|
||||
|
@ -984,9 +982,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
|||
{ short matchlengthNCount[MaxML+1];
|
||||
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
|
||||
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
|
||||
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
|
||||
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
|
||||
ZSTD_buildFSETable( entropy->MLTable,
|
||||
matchlengthNCount, matchlengthMaxValue,
|
||||
ML_base, ML_bits,
|
||||
|
@ -997,9 +995,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
|||
{ short litlengthNCount[MaxLL+1];
|
||||
unsigned litlengthMaxValue = MaxLL, litlengthLog;
|
||||
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
|
||||
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
|
||||
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
|
||||
ZSTD_buildFSETable( entropy->LLTable,
|
||||
litlengthNCount, litlengthMaxValue,
|
||||
LL_base, LL_bits,
|
||||
|
@ -1007,12 +1005,13 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
|||
dictPtr += litlengthHeaderSize;
|
||||
}
|
||||
|
||||
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
|
||||
{ int i;
|
||||
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
|
||||
for (i=0; i<3; i++) {
|
||||
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
|
||||
if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
|
||||
dictionary_corrupted);
|
||||
entropy->rep[i] = rep;
|
||||
} }
|
||||
|
||||
|
@ -1030,7 +1029,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
|
|||
|
||||
/* load entropy tables */
|
||||
{ size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
|
||||
if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
|
||||
dict = (const char*)dict + eSize;
|
||||
dictSize -= eSize;
|
||||
}
|
||||
|
@ -1064,9 +1063,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
|
|||
|
||||
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
|
||||
{
|
||||
CHECK_F( ZSTD_decompressBegin(dctx) );
|
||||
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
|
||||
if (dict && dictSize)
|
||||
CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
|
||||
RETURN_ERROR_IF(
|
||||
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
|
||||
dictionary_corrupted);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1085,7 +1086,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
|
|||
DEBUGLOG(4, "DDict is %s",
|
||||
dctx->ddictIsCold ? "~cold~" : "hot!");
|
||||
}
|
||||
CHECK_F( ZSTD_decompressBegin(dctx) );
|
||||
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
|
||||
if (ddict) { /* NULL ddict is equivalent to no dictionary */
|
||||
ZSTD_copyDDictParameters(dctx, ddict);
|
||||
}
|
||||
|
@ -1176,11 +1177,11 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
|
|||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
|
||||
ZSTD_freeDDict(dctx->ddictLocal);
|
||||
if (dict && dictSize >= 8) {
|
||||
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
|
||||
if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
|
||||
} else {
|
||||
dctx->ddictLocal = NULL;
|
||||
}
|
||||
|
@ -1217,7 +1218,7 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
|
|||
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
|
||||
zds->streamStage = zdss_init;
|
||||
zds->noForwardProgress = 0;
|
||||
CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
|
||||
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
|
||||
return ZSTD_FRAMEHEADERSIZE_PREFIX;
|
||||
}
|
||||
|
||||
|
@ -1254,7 +1255,7 @@ size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
|
|||
|
||||
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
|
||||
{
|
||||
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
|
||||
dctx->ddict = ddict;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1267,9 +1268,9 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
|
|||
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
|
||||
size_t const min = (size_t)1 << bounds.lowerBound;
|
||||
size_t const max = (size_t)1 << bounds.upperBound;
|
||||
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
|
||||
if (maxWindowSize < min) return ERROR(parameter_outOfBound);
|
||||
if (maxWindowSize > max) return ERROR(parameter_outOfBound);
|
||||
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
|
||||
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
|
||||
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
|
||||
dctx->maxWindowSize = maxWindowSize;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1311,13 +1312,12 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
|
|||
}
|
||||
|
||||
#define CHECK_DBOUNDS(p,v) { \
|
||||
if (!ZSTD_dParam_withinBounds(p, v)) \
|
||||
return ERROR(parameter_outOfBound); \
|
||||
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
|
||||
}
|
||||
|
||||
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
|
||||
{
|
||||
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
|
||||
switch(dParam) {
|
||||
case ZSTD_d_windowLogMax:
|
||||
CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
|
||||
|
@ -1329,7 +1329,7 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
|
|||
return 0;
|
||||
default:;
|
||||
}
|
||||
return ERROR(parameter_unsupported);
|
||||
RETURN_ERROR(parameter_unsupported);
|
||||
}
|
||||
|
||||
size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
|
||||
|
@ -1340,8 +1340,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
|
|||
}
|
||||
if ( (reset == ZSTD_reset_parameters)
|
||||
|| (reset == ZSTD_reset_session_and_parameters) ) {
|
||||
if (dctx->streamStage != zdss_init)
|
||||
return ERROR(stage_wrong);
|
||||
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
|
||||
dctx->format = ZSTD_f_zstd1;
|
||||
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
|
||||
}
|
||||
|
@ -1360,7 +1359,8 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
|
|||
unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
|
||||
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
|
||||
size_t const minRBSize = (size_t) neededSize;
|
||||
if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
|
||||
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
|
||||
frameParameter_windowTooLarge);
|
||||
return minRBSize;
|
||||
}
|
||||
|
||||
|
@ -1378,9 +1378,9 @@ size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
|
|||
ZSTD_frameHeader zfh;
|
||||
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
|
||||
if (ZSTD_isError(err)) return err;
|
||||
if (err>0) return ERROR(srcSize_wrong);
|
||||
if (zfh.windowSize > windowSizeMax)
|
||||
return ERROR(frameParameter_windowTooLarge);
|
||||
RETURN_ERROR_IF(err>0, srcSize_wrong);
|
||||
RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
|
||||
frameParameter_windowTooLarge);
|
||||
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
|
||||
}
|
||||
|
||||
|
@ -1406,16 +1406,16 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
U32 someMoreWork = 1;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_decompressStream");
|
||||
if (input->pos > input->size) { /* forbidden */
|
||||
DEBUGLOG(5, "in: pos: %u vs size: %u",
|
||||
(U32)input->pos, (U32)input->size);
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
if (output->pos > output->size) { /* forbidden */
|
||||
DEBUGLOG(5, "out: pos: %u vs size: %u",
|
||||
(U32)output->pos, (U32)output->size);
|
||||
return ERROR(dstSize_tooSmall);
|
||||
}
|
||||
RETURN_ERROR_IF(
|
||||
input->pos > input->size,
|
||||
srcSize_wrong,
|
||||
"forbidden. in: pos: %u vs size: %u",
|
||||
(U32)input->pos, (U32)input->size);
|
||||
RETURN_ERROR_IF(
|
||||
output->pos > output->size,
|
||||
dstSize_tooSmall,
|
||||
"forbidden. out: pos: %u vs size: %u",
|
||||
(U32)output->pos, (U32)output->size);
|
||||
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
|
||||
|
||||
while (someMoreWork) {
|
||||
|
@ -1430,8 +1430,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||
if (zds->legacyVersion) {
|
||||
/* legacy support is incompatible with static dctx */
|
||||
if (zds->staticSize) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
|
||||
"legacy support is incompatible with static dctx");
|
||||
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
|
||||
if (hint==0) zds->streamStage = zdss_init;
|
||||
return hint;
|
||||
|
@ -1446,9 +1446,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
const void* const dict = zds->ddict ? ZSTD_DDict_dictContent(zds->ddict) : NULL;
|
||||
size_t const dictSize = zds->ddict ? ZSTD_DDict_dictSize(zds->ddict) : 0;
|
||||
DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
|
||||
/* legacy support is incompatible with static dctx */
|
||||
if (zds->staticSize) return ERROR(memory_allocation);
|
||||
CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
|
||||
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
|
||||
"legacy support is incompatible with static dctx");
|
||||
FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
|
||||
zds->previousLegacyVersion, legacyVersion,
|
||||
dict, dictSize));
|
||||
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
|
||||
|
@ -1495,13 +1495,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
|
||||
/* Consume header (see ZSTDds_decodeFrameHeader) */
|
||||
DEBUGLOG(4, "Consume header");
|
||||
CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
|
||||
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
|
||||
|
||||
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
|
||||
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
|
||||
zds->stage = ZSTDds_skipFrame;
|
||||
} else {
|
||||
CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
|
||||
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
|
||||
zds->expected = ZSTD_blockHeaderSize;
|
||||
zds->stage = ZSTDds_decodeBlockHeader;
|
||||
}
|
||||
|
@ -1511,7 +1511,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
(U32)(zds->fParams.windowSize >>10),
|
||||
(U32)(zds->maxWindowSize >> 10) );
|
||||
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
|
||||
if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
|
||||
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
|
||||
frameParameter_windowTooLarge);
|
||||
|
||||
/* Adapt buffer sizes to frame header instructions */
|
||||
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
|
||||
|
@ -1525,14 +1526,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
if (zds->staticSize) { /* static DCtx */
|
||||
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
|
||||
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
|
||||
if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
|
||||
return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(
|
||||
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
|
||||
memory_allocation);
|
||||
} else {
|
||||
ZSTD_free(zds->inBuff, zds->customMem);
|
||||
zds->inBuffSize = 0;
|
||||
zds->outBuffSize = 0;
|
||||
zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
|
||||
if (zds->inBuff == NULL) return ERROR(memory_allocation);
|
||||
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
|
||||
}
|
||||
zds->inBuffSize = neededInBuffSize;
|
||||
zds->outBuff = zds->inBuff + zds->inBuffSize;
|
||||
|
@ -1574,7 +1576,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
if (isSkipFrame) {
|
||||
loadedSize = MIN(toLoad, (size_t)(iend-ip));
|
||||
} else {
|
||||
if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
|
||||
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
|
||||
corruption_detected,
|
||||
"should never happen");
|
||||
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
|
||||
}
|
||||
ip += loadedSize;
|
||||
|
@ -1615,7 +1619,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
|
||||
default:
|
||||
assert(0); /* impossible */
|
||||
return ERROR(GENERIC); /* some compiler require default to do something */
|
||||
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
|
||||
} }
|
||||
|
||||
/* result */
|
||||
|
@ -1624,8 +1628,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||
if ((ip==istart) && (op==ostart)) { /* no forward progress */
|
||||
zds->noForwardProgress ++;
|
||||
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
|
||||
if (op==oend) return ERROR(dstSize_tooSmall);
|
||||
if (ip==iend) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(ip==iend, srcSize_wrong);
|
||||
assert(0);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -56,14 +56,15 @@ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
|
|||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr)
|
||||
{
|
||||
if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
|
||||
|
||||
{ U32 const cBlockHeader = MEM_readLE24(src);
|
||||
U32 const cSize = cBlockHeader >> 3;
|
||||
bpPtr->lastBlock = cBlockHeader & 1;
|
||||
bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
|
||||
bpPtr->origSize = cSize; /* only useful for RLE */
|
||||
if (bpPtr->blockType == bt_rle) return 1;
|
||||
if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
|
||||
return cSize;
|
||||
}
|
||||
}
|
||||
|
@ -78,7 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
|
||||
{
|
||||
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
|
||||
|
||||
{ const BYTE* const istart = (const BYTE*) src;
|
||||
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
|
||||
|
@ -86,11 +87,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
switch(litEncType)
|
||||
{
|
||||
case set_repeat:
|
||||
if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
|
||||
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
|
||||
/* fall-through */
|
||||
|
||||
case set_compressed:
|
||||
if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
|
||||
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
|
||||
{ size_t lhSize, litSize, litCSize;
|
||||
U32 singleStream=0;
|
||||
U32 const lhlCode = (istart[0] >> 2) & 3;
|
||||
|
@ -118,8 +119,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
litCSize = (lhc >> 22) + (istart[4] << 10);
|
||||
break;
|
||||
}
|
||||
if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
|
||||
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
|
||||
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
|
||||
|
||||
/* prefetch huffman table if cold */
|
||||
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
|
||||
|
@ -157,7 +158,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
}
|
||||
}
|
||||
|
||||
if (HUF_isError(hufSuccess)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litSize = litSize;
|
||||
|
@ -187,7 +188,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
}
|
||||
|
||||
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
|
||||
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart+lhSize, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litSize = litSize;
|
||||
|
@ -216,17 +217,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
case 3:
|
||||
lhSize = 3;
|
||||
litSize = MEM_readLE24(istart) >> 4;
|
||||
if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
|
||||
RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
|
||||
break;
|
||||
}
|
||||
if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+1;
|
||||
}
|
||||
default:
|
||||
return ERROR(corruption_detected); /* impossible */
|
||||
RETURN_ERROR(corruption_detected, "impossible");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -436,8 +437,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
|
|||
switch(type)
|
||||
{
|
||||
case set_rle :
|
||||
if (!srcSize) return ERROR(srcSize_wrong);
|
||||
if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(!srcSize, srcSize_wrong);
|
||||
RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
|
||||
{ U32 const symbol = *(const BYTE*)src;
|
||||
U32 const baseline = baseValue[symbol];
|
||||
U32 const nbBits = nbAdditionalBits[symbol];
|
||||
|
@ -449,7 +450,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
|
|||
*DTablePtr = defaultTable;
|
||||
return 0;
|
||||
case set_repeat:
|
||||
if (!flagRepeatTable) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
|
||||
/* prefetch FSE table if used */
|
||||
if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
|
||||
const void* const pStart = *DTablePtr;
|
||||
|
@ -461,15 +462,15 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
|
|||
{ unsigned tableLog;
|
||||
S16 norm[MaxSeq+1];
|
||||
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
|
||||
if (FSE_isError(headerSize)) return ERROR(corruption_detected);
|
||||
if (tableLog > maxLog) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
|
||||
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
|
||||
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
|
||||
*DTablePtr = DTableSpace;
|
||||
return headerSize;
|
||||
}
|
||||
default : /* impossible */
|
||||
default :
|
||||
assert(0);
|
||||
return ERROR(GENERIC);
|
||||
RETURN_ERROR(GENERIC, "impossible");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -483,28 +484,28 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
|||
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
|
||||
|
||||
/* check */
|
||||
if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
|
||||
|
||||
/* SeqHead */
|
||||
nbSeq = *ip++;
|
||||
if (!nbSeq) {
|
||||
*nbSeqPtr=0;
|
||||
if (srcSize != 1) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
|
||||
return 1;
|
||||
}
|
||||
if (nbSeq > 0x7F) {
|
||||
if (nbSeq == 0xFF) {
|
||||
if (ip+2 > iend) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
|
||||
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
|
||||
} else {
|
||||
if (ip >= iend) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
|
||||
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
|
||||
}
|
||||
}
|
||||
*nbSeqPtr = nbSeq;
|
||||
|
||||
/* FSE table descriptors */
|
||||
if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
|
||||
RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */
|
||||
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
|
||||
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
|
||||
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
|
||||
|
@ -517,7 +518,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
|||
LL_base, LL_bits,
|
||||
LL_defaultDTable, dctx->fseEntropy,
|
||||
dctx->ddictIsCold, nbSeq);
|
||||
if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
|
||||
ip += llhSize;
|
||||
}
|
||||
|
||||
|
@ -527,7 +528,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
|||
OF_base, OF_bits,
|
||||
OF_defaultDTable, dctx->fseEntropy,
|
||||
dctx->ddictIsCold, nbSeq);
|
||||
if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
|
||||
ip += ofhSize;
|
||||
}
|
||||
|
||||
|
@ -537,7 +538,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
|||
ML_base, ML_bits,
|
||||
ML_defaultDTable, dctx->fseEntropy,
|
||||
dctx->ddictIsCold, nbSeq);
|
||||
if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
|
||||
ip += mlhSize;
|
||||
}
|
||||
}
|
||||
|
@ -590,8 +591,8 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
|
|||
const BYTE* match = oLitEnd - sequence.offset;
|
||||
|
||||
/* check */
|
||||
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must fit within dstBuffer */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* try to read beyond literal buffer */
|
||||
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
|
||||
|
||||
/* copy literals */
|
||||
while (op < oLitEnd) *op++ = *(*litPtr)++;
|
||||
|
@ -599,7 +600,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
|
|||
/* copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - base)) {
|
||||
/* offset beyond prefix */
|
||||
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
|
||||
match = dictEnd - (base-match);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
|
@ -631,8 +632,8 @@ size_t ZSTD_execSequence(BYTE* op,
|
|||
const BYTE* match = oLitEnd - sequence.offset;
|
||||
|
||||
/* check */
|
||||
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
|
||||
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
|
||||
|
||||
/* copy Literals */
|
||||
|
@ -645,8 +646,7 @@ size_t ZSTD_execSequence(BYTE* op,
|
|||
/* copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
|
||||
/* offset beyond prefix -> go into extDict */
|
||||
if (sequence.offset > (size_t)(oLitEnd - virtualStart))
|
||||
return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
|
||||
match = dictEnd + (match - prefixStart);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
|
@ -712,8 +712,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
|
|||
const BYTE* match = sequence.match;
|
||||
|
||||
/* check */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
|
||||
if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
|
||||
|
||||
/* copy Literals */
|
||||
|
@ -726,7 +726,7 @@ size_t ZSTD_execSequenceLong(BYTE* op,
|
|||
/* copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
|
||||
/* offset beyond prefix */
|
||||
if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
|
@ -911,7 +911,9 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
|
|||
seqState_t seqState;
|
||||
dctx->fseEntropy = 1;
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
|
||||
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
|
||||
corruption_detected);
|
||||
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
|
||||
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
|
||||
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
|
||||
|
@ -927,14 +929,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
|
|||
|
||||
/* check if reached exact end */
|
||||
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
|
||||
if (nbSeq) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(nbSeq, corruption_detected);
|
||||
/* save reps for next block */
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
}
|
||||
|
||||
/* last literal segment */
|
||||
{ size_t const lastLLSize = litEnd - litPtr;
|
||||
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
|
||||
memcpy(op, litPtr, lastLLSize);
|
||||
op += lastLLSize;
|
||||
}
|
||||
|
@ -1066,7 +1068,9 @@ ZSTD_decompressSequencesLong_body(
|
|||
seqState.pos = (size_t)(op-prefixStart);
|
||||
seqState.dictEnd = dictEnd;
|
||||
assert(iend >= ip);
|
||||
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
|
||||
corruption_detected);
|
||||
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
|
||||
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
|
||||
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
|
||||
|
@ -1076,7 +1080,7 @@ ZSTD_decompressSequencesLong_body(
|
|||
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
|
||||
PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
|
||||
}
|
||||
if (seqNb<seqAdvance) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
|
||||
|
||||
/* decode and decompress */
|
||||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
|
||||
|
@ -1087,7 +1091,7 @@ ZSTD_decompressSequencesLong_body(
|
|||
sequences[seqNb & STORED_SEQS_MASK] = sequence;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
if (seqNb<nbSeq) return ERROR(corruption_detected);
|
||||
RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
|
||||
|
||||
/* finish queue */
|
||||
seqNb -= seqAdvance;
|
||||
|
@ -1103,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
|
|||
|
||||
/* last literal segment */
|
||||
{ size_t const lastLLSize = litEnd - litPtr;
|
||||
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
|
||||
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
|
||||
memcpy(op, litPtr, lastLLSize);
|
||||
op += lastLLSize;
|
||||
}
|
||||
|
@ -1240,7 +1244,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
|||
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
|
||||
DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
|
||||
|
||||
if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
|
||||
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
|
||||
|
||||
/* Decode literals section */
|
||||
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
|
||||
|
|
|
@ -938,7 +938,9 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
|
|||
FSE_CState_t stateOffsetBits;
|
||||
FSE_CState_t stateLitLength;
|
||||
|
||||
CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initCStream(&blockStream, op, oend-op)),
|
||||
dstSize_tooSmall, "not enough space remaining");
|
||||
|
||||
/* first symbols */
|
||||
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
|
||||
|
|
Loading…
Reference in New Issue