2017-09-01 18:28:35 -07:00
/*
* Copyright ( c ) 2016 - present , Yann Collet , Facebook , Inc .
* All rights reserved .
*
* This source code is licensed under both the BSD - style license ( found in the
* LICENSE file in the root directory of this source tree ) and the GPLv2 ( found
* in the COPYING file in the root directory of this source tree ) .
2017-09-08 00:09:23 -07:00
* You may select , at your option , one of the above - listed licenses .
2017-09-01 18:28:35 -07:00
*/
2017-11-07 15:27:06 -08:00
/* This header contains definitions
2017-11-07 16:15:23 -08:00
* that shall * * only * * be used by modules within lib / compress .
2017-11-07 15:27:06 -08:00
*/
2017-09-01 18:28:35 -07:00
# ifndef ZSTD_COMPRESS_H
# define ZSTD_COMPRESS_H
/*-*************************************
* Dependencies
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "zstd_internal.h"
2017-09-11 14:09:34 -07:00
# ifdef ZSTD_MULTITHREAD
# include "zstdmt_compress.h"
# endif
2017-09-01 18:28:35 -07:00
# if defined (__cplusplus)
extern " C " {
# endif
/*-*************************************
* Constants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2018-02-02 16:31:20 -08:00
# define kSearchStrength 8
# define HASH_READ_SIZE 8
# define ZSTD_CLEVEL_CUSTOM 999
2018-02-07 14:22:35 -08:00
# define ZSTD_DUBT_UNSORTED_MARK 1 / * For btlazy2 strategy, index 1 now means "unsorted".
It could be confused for a real successor at index " 1 " , if sorted as larger than its predecessor .
It ' s not a big deal though : candidate will just be sorted again .
Additionnally , candidate position 1 will be lost .
But candidate 1 cannot hide a large tree of candidates , so it ' s a minimal loss .
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re - use with a different strategy */
2017-09-01 18:28:35 -07:00
/*-*************************************
* Context memory management
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef enum { ZSTDcs_created = 0 , ZSTDcs_init , ZSTDcs_ongoing , ZSTDcs_ending } ZSTD_compressionStage_e ;
typedef enum { zcss_init = 0 , zcss_load , zcss_flush } ZSTD_cStreamStage ;
typedef struct ZSTD_prefixDict_s {
const void * dict ;
size_t dictSize ;
ZSTD_dictMode_e dictMode ;
} ZSTD_prefixDict ;
2017-11-07 15:27:06 -08:00
typedef struct {
U32 hufCTable [ HUF_CTABLE_SIZE_U32 ( 255 ) ] ;
FSE_CTable offcodeCTable [ FSE_CTABLE_SIZE_U32 ( OffFSELog , MaxOff ) ] ;
FSE_CTable matchlengthCTable [ FSE_CTABLE_SIZE_U32 ( MLFSELog , MaxML ) ] ;
FSE_CTable litlengthCTable [ FSE_CTABLE_SIZE_U32 ( LLFSELog , MaxLL ) ] ;
HUF_repeat hufCTable_repeatMode ;
FSE_repeat offcode_repeatMode ;
FSE_repeat matchlength_repeatMode ;
FSE_repeat litlength_repeatMode ;
} ZSTD_entropyCTables_t ;
typedef struct {
U32 off ;
U32 len ;
} ZSTD_match_t ;
typedef struct {
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 14:07:03 -08:00
int price ;
2017-11-07 15:27:06 -08:00
U32 off ;
U32 mlen ;
U32 litlen ;
U32 rep [ ZSTD_REP_NUM ] ;
} ZSTD_optimal_t ;
typedef struct {
2017-11-18 16:24:02 -08:00
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
U32 * litFreq ; /* table of literals statistics, of size 256 */
U32 * litLengthFreq ; /* table of litLength statistics, of size (MaxLL+1) */
U32 * matchLengthFreq ; /* table of matchLength statistics, of size (MaxML+1) */
U32 * offCodeFreq ; /* table of offCode statistics, of size (MaxOff+1) */
ZSTD_match_t * matchTable ; /* list of found matches, of size ZSTD_OPT_NUM+1 */
ZSTD_optimal_t * priceTable ; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
2017-11-07 15:27:06 -08:00
2017-11-08 11:05:32 -08:00
U32 litSum ; /* nb of literals */
U32 litLengthSum ; /* nb of litLength codes */
U32 matchLengthSum ; /* nb of matchLength codes */
U32 offCodeSum ; /* nb of offset codes */
/* begin updated by ZSTD_setLog2Prices */
U32 log2litSum ; /* pow2 to compare log2(litfreq) to */
U32 log2litLengthSum ; /* pow2 to compare log2(llfreq) to */
U32 log2matchLengthSum ; /* pow2 to compare log2(mlfreq) to */
U32 log2offCodeSum ; /* pow2 to compare log2(offreq) to */
/* end : updated by ZSTD_setLog2Prices */
2017-11-18 16:24:02 -08:00
U32 staticPrices ; /* prices follow a pre-defined cost structure, statistics are irrelevant */
2017-11-07 15:27:06 -08:00
} optState_t ;
2017-12-12 16:51:00 -08:00
typedef struct {
2018-01-12 12:06:10 -08:00
ZSTD_entropyCTables_t entropy ;
U32 rep [ ZSTD_REP_NUM ] ;
} ZSTD_compressedBlockState_t ;
typedef struct {
BYTE const * nextSrc ; /* next block here to continue on current prefix */
2017-12-12 16:51:00 -08:00
BYTE const * base ; /* All regular indexes relative to this position */
BYTE const * dictBase ; /* extDict indexes relative to this position */
U32 dictLimit ; /* below that point, need extDict */
U32 lowLimit ; /* below that point, no more data */
U32 nextToUpdate ; /* index from which to continue table update */
U32 nextToUpdate3 ; /* index from which to continue table update */
U32 hashLog3 ; /* dispatch table : larger == faster, more memory */
2018-01-12 12:06:10 -08:00
U32 loadedDictEnd ; /* index of end of dictionary */
2017-12-12 16:51:00 -08:00
U32 * hashTable ;
U32 * hashTable3 ;
U32 * chainTable ;
optState_t opt ; /* optimal parser state */
} ZSTD_matchState_t ;
2018-01-12 12:06:10 -08:00
typedef struct {
ZSTD_compressedBlockState_t * prevCBlock ;
ZSTD_compressedBlockState_t * nextCBlock ;
ZSTD_matchState_t matchState ;
} ZSTD_blockState_t ;
2017-11-07 15:27:06 -08:00
typedef struct {
U32 offset ;
U32 checksum ;
} ldmEntry_t ;
typedef struct {
ldmEntry_t * hashTable ;
BYTE * bucketOffsets ; /* Next position in bucket to insert entry */
U64 hashPower ; /* Used to compute the rolling hash.
* Depends on ldmParams . minMatchLength */
} ldmState_t ;
typedef struct {
U32 enableLdm ; /* 1 if enable long distance matching */
U32 hashLog ; /* Log size of hashTable */
U32 bucketSizeLog ; /* Log bucket size for collision resolution, at most 8 */
U32 minMatchLength ; /* Minimum match length */
U32 hashEveryLog ; /* Log number of entries to skip */
} ldmParams_t ;
struct ZSTD_CCtx_params_s {
ZSTD_format_e format ;
ZSTD_compressionParameters cParams ;
ZSTD_frameParameters fParams ;
int compressionLevel ;
2018-02-01 19:29:30 -08:00
int forceWindow ; /* force back-references to respect limit of
2017-11-07 15:27:06 -08:00
* 1 < < wLog , even for dictionary */
/* Multithreading: used to pass parameters to mtctx */
2018-02-01 19:29:30 -08:00
unsigned nbWorkers ;
2017-11-07 15:27:06 -08:00
unsigned jobSize ;
unsigned overlapSizeLog ;
/* Long distance matching parameters */
ldmParams_t ldmParams ;
/* For use with createCCtxParams() and freeCCtxParams() only */
ZSTD_customMem customMem ;
} ; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
2017-09-01 18:28:35 -07:00
struct ZSTD_CCtx_s {
ZSTD_compressionStage_e stage ;
2018-02-02 14:24:56 -08:00
int cParamsChanged ; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
2018-02-20 14:12:11 -08:00
int bmi2 ; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
2017-09-01 18:28:35 -07:00
ZSTD_CCtx_params requestedParams ;
ZSTD_CCtx_params appliedParams ;
2018-02-02 14:24:56 -08:00
U32 dictID ;
2017-09-01 18:28:35 -07:00
void * workSpace ;
size_t workSpaceSize ;
size_t blockSize ;
2018-01-18 11:15:23 -08:00
unsigned long long pledgedSrcSizePlusOne ; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize ;
unsigned long long producedCSize ;
2017-09-01 18:28:35 -07:00
XXH64_state_t xxhState ;
ZSTD_customMem customMem ;
size_t staticSize ;
seqStore_t seqStore ; /* sequences storage ptrs */
2017-09-06 15:56:32 -07:00
ldmState_t ldmState ; /* long distance matching state */
2018-02-20 19:34:43 -08:00
rawSeq * ldmSequences ; /* Storage for the ldm output sequences */
2018-01-12 12:06:10 -08:00
ZSTD_blockState_t blockState ;
2017-12-12 16:51:00 -08:00
U32 * entropyWorkspace ; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
2017-09-01 18:28:35 -07:00
/* streaming */
char * inBuff ;
size_t inBuffSize ;
size_t inToCompress ;
size_t inBuffPos ;
size_t inBuffTarget ;
char * outBuff ;
size_t outBuffSize ;
size_t outBuffContentSize ;
size_t outBuffFlushedSize ;
ZSTD_cStreamStage streamStage ;
U32 frameEnded ;
/* Dictionary */
ZSTD_CDict * cdictLocal ;
const ZSTD_CDict * cdict ;
ZSTD_prefixDict prefixDict ; /* single-usage dictionary */
/* Multi-threading */
2017-09-11 14:09:34 -07:00
# ifdef ZSTD_MULTITHREAD
2017-09-01 18:28:35 -07:00
ZSTDMT_CCtx * mtctx ;
2017-09-11 14:09:34 -07:00
# endif
2017-09-01 18:28:35 -07:00
} ;
2017-12-12 16:51:00 -08:00
typedef size_t ( * ZSTD_blockCompressor ) (
2018-01-12 12:06:10 -08:00
ZSTD_matchState_t * bs , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2017-12-12 16:51:00 -08:00
ZSTD_compressionParameters const * cParams , void const * src , size_t srcSize ) ;
ZSTD_blockCompressor ZSTD_selectBlockCompressor ( ZSTD_strategy strat , int extDict ) ;
2017-11-08 12:33:06 -08:00
MEM_STATIC U32 ZSTD_LLcode ( U32 litLength )
{
static const BYTE LL_Code [ 64 ] = { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,
8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ,
16 , 16 , 17 , 17 , 18 , 18 , 19 , 19 ,
20 , 20 , 20 , 20 , 21 , 21 , 21 , 21 ,
22 , 22 , 22 , 22 , 22 , 22 , 22 , 22 ,
23 , 23 , 23 , 23 , 23 , 23 , 23 , 23 ,
24 , 24 , 24 , 24 , 24 , 24 , 24 , 24 ,
24 , 24 , 24 , 24 , 24 , 24 , 24 , 24 } ;
static const U32 LL_deltaCode = 19 ;
return ( litLength > 63 ) ? ZSTD_highbit32 ( litLength ) + LL_deltaCode : LL_Code [ litLength ] ;
}
/* ZSTD_MLcode() :
* note : mlBase = matchLength - MINMATCH ;
* because it ' s the format it ' s stored in seqStore - > sequences */
MEM_STATIC U32 ZSTD_MLcode ( U32 mlBase )
{
static const BYTE ML_Code [ 128 ] = { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ,
16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 ,
32 , 32 , 33 , 33 , 34 , 34 , 35 , 35 , 36 , 36 , 36 , 36 , 37 , 37 , 37 , 37 ,
38 , 38 , 38 , 38 , 38 , 38 , 38 , 38 , 39 , 39 , 39 , 39 , 39 , 39 , 39 , 39 ,
40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 , 40 ,
41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 , 41 ,
42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 ,
42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 , 42 } ;
static const U32 ML_deltaCode = 36 ;
return ( mlBase > 127 ) ? ZSTD_highbit32 ( mlBase ) + ML_deltaCode : ML_Code [ mlBase ] ;
}
2017-09-01 18:28:35 -07:00
/*! ZSTD_storeSeq() :
2017-11-07 15:27:06 -08:00
* Store a sequence ( literal length , literals , offset code and match length code ) into seqStore_t .
* ` offsetCode ` : distance to match + 3 ( values 1 - 3 are repCodes ) .
2017-11-08 12:33:06 -08:00
* ` mlBase ` : matchLength - MINMATCH
2017-09-01 18:28:35 -07:00
*/
2017-11-08 12:33:06 -08:00
MEM_STATIC void ZSTD_storeSeq ( seqStore_t * seqStorePtr , size_t litLength , const void * literals , U32 offsetCode , size_t mlBase )
2017-09-01 18:28:35 -07:00
{
2017-11-15 11:29:24 -08:00
# if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
2017-09-01 18:28:35 -07:00
static const BYTE * g_start = NULL ;
2017-10-13 02:36:16 -07:00
if ( g_start = = NULL ) g_start = ( const BYTE * ) literals ; /* note : index only works for compression within a single segment */
2017-11-13 02:19:36 -08:00
{ U32 const pos = ( U32 ) ( ( const BYTE * ) literals - g_start ) ;
2017-11-15 11:29:24 -08:00
DEBUGLOG ( 6 , " Cpos%7u :%3u literals, match%3u bytes at dist.code%7u " ,
2017-11-08 12:33:06 -08:00
pos , ( U32 ) litLength , ( U32 ) mlBase + MINMATCH , ( U32 ) offsetCode ) ;
2017-11-13 02:19:36 -08:00
}
2017-09-01 18:28:35 -07:00
# endif
/* copy Literals */
assert ( seqStorePtr - > lit + litLength < = seqStorePtr - > litStart + 128 KB ) ;
ZSTD_wildcopy ( seqStorePtr - > lit , literals , litLength ) ;
seqStorePtr - > lit + = litLength ;
/* literal Length */
if ( litLength > 0xFFFF ) {
2017-11-07 15:27:06 -08:00
assert ( seqStorePtr - > longLengthID = = 0 ) ; /* there can only be a single long length */
2017-09-01 18:28:35 -07:00
seqStorePtr - > longLengthID = 1 ;
seqStorePtr - > longLengthPos = ( U32 ) ( seqStorePtr - > sequences - seqStorePtr - > sequencesStart ) ;
}
seqStorePtr - > sequences [ 0 ] . litLength = ( U16 ) litLength ;
/* match offset */
seqStorePtr - > sequences [ 0 ] . offset = offsetCode + 1 ;
/* match Length */
2017-11-08 12:33:06 -08:00
if ( mlBase > 0xFFFF ) {
2017-11-07 15:27:06 -08:00
assert ( seqStorePtr - > longLengthID = = 0 ) ; /* there can only be a single long length */
2017-09-01 18:28:35 -07:00
seqStorePtr - > longLengthID = 2 ;
seqStorePtr - > longLengthPos = ( U32 ) ( seqStorePtr - > sequences - seqStorePtr - > sequencesStart ) ;
}
2017-11-08 12:33:06 -08:00
seqStorePtr - > sequences [ 0 ] . matchLength = ( U16 ) mlBase ;
2017-09-01 18:28:35 -07:00
seqStorePtr - > sequences + + ;
}
/*-*************************************
* Match length counter
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-12-04 17:57:42 -08:00
static unsigned ZSTD_NbCommonBytes ( size_t val )
2017-09-01 18:28:35 -07:00
{
if ( MEM_isLittleEndian ( ) ) {
if ( MEM_64bits ( ) ) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0 ;
_BitScanForward64 ( & r , ( U64 ) val ) ;
return ( unsigned ) ( r > > 3 ) ;
2017-09-11 15:17:31 -07:00
# elif defined(__GNUC__) && (__GNUC__ >= 4)
2017-09-01 18:28:35 -07:00
return ( __builtin_ctzll ( ( U64 ) val ) > > 3 ) ;
# else
static const int DeBruijnBytePos [ 64 ] = { 0 , 0 , 0 , 0 , 0 , 1 , 1 , 2 ,
0 , 3 , 1 , 3 , 1 , 4 , 2 , 7 ,
0 , 2 , 3 , 6 , 1 , 5 , 3 , 5 ,
1 , 3 , 4 , 4 , 2 , 5 , 6 , 7 ,
7 , 0 , 1 , 2 , 3 , 3 , 4 , 6 ,
2 , 6 , 5 , 5 , 3 , 4 , 5 , 6 ,
7 , 1 , 2 , 4 , 6 , 4 , 4 , 5 ,
7 , 2 , 6 , 5 , 7 , 6 , 7 , 7 } ;
return DeBruijnBytePos [ ( ( U64 ) ( ( val & - ( long long ) val ) * 0x0218A392CDABBD3FULL ) ) > > 58 ] ;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0 ;
_BitScanForward ( & r , ( U32 ) val ) ;
return ( unsigned ) ( r > > 3 ) ;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return ( __builtin_ctz ( ( U32 ) val ) > > 3 ) ;
# else
static const int DeBruijnBytePos [ 32 ] = { 0 , 0 , 3 , 0 , 3 , 1 , 3 , 0 ,
3 , 2 , 2 , 1 , 3 , 2 , 0 , 1 ,
3 , 3 , 1 , 2 , 2 , 2 , 2 , 0 ,
3 , 1 , 2 , 0 , 1 , 0 , 1 , 1 } ;
return DeBruijnBytePos [ ( ( U32 ) ( ( val & - ( S32 ) val ) * 0x077CB531U ) ) > > 27 ] ;
# endif
}
} else { /* Big Endian CPU */
if ( MEM_64bits ( ) ) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0 ;
_BitScanReverse64 ( & r , val ) ;
return ( unsigned ) ( r > > 3 ) ;
2017-09-11 15:17:31 -07:00
# elif defined(__GNUC__) && (__GNUC__ >= 4)
2017-09-01 18:28:35 -07:00
return ( __builtin_clzll ( val ) > > 3 ) ;
# else
unsigned r ;
const unsigned n32 = sizeof ( size_t ) * 4 ; /* calculate this way due to compiler complaining in 32-bits mode */
if ( ! ( val > > n32 ) ) { r = 4 ; } else { r = 0 ; val > > = n32 ; }
if ( ! ( val > > 16 ) ) { r + = 2 ; val > > = 8 ; } else { val > > = 24 ; }
r + = ( ! val ) ;
return r ;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0 ;
_BitScanReverse ( & r , ( unsigned long ) val ) ;
return ( unsigned ) ( r > > 3 ) ;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return ( __builtin_clz ( ( U32 ) val ) > > 3 ) ;
# else
unsigned r ;
if ( ! ( val > > 16 ) ) { r = 2 ; val > > = 8 ; } else { r = 0 ; val > > = 24 ; }
r + = ( ! val ) ;
return r ;
# endif
} }
}
MEM_STATIC size_t ZSTD_count ( const BYTE * pIn , const BYTE * pMatch , const BYTE * const pInLimit )
{
const BYTE * const pStart = pIn ;
const BYTE * const pInLoopLimit = pInLimit - ( sizeof ( size_t ) - 1 ) ;
2017-11-19 14:40:21 -08:00
if ( pIn < pInLoopLimit ) {
{ size_t const diff = MEM_readST ( pMatch ) ^ MEM_readST ( pIn ) ;
if ( diff ) return ZSTD_NbCommonBytes ( diff ) ; }
pIn + = sizeof ( size_t ) ; pMatch + = sizeof ( size_t ) ;
while ( pIn < pInLoopLimit ) {
size_t const diff = MEM_readST ( pMatch ) ^ MEM_readST ( pIn ) ;
if ( ! diff ) { pIn + = sizeof ( size_t ) ; pMatch + = sizeof ( size_t ) ; continue ; }
pIn + = ZSTD_NbCommonBytes ( diff ) ;
return ( size_t ) ( pIn - pStart ) ;
} }
if ( MEM_64bits ( ) & & ( pIn < ( pInLimit - 3 ) ) & & ( MEM_read32 ( pMatch ) = = MEM_read32 ( pIn ) ) ) { pIn + = 4 ; pMatch + = 4 ; }
2017-09-01 18:28:35 -07:00
if ( ( pIn < ( pInLimit - 1 ) ) & & ( MEM_read16 ( pMatch ) = = MEM_read16 ( pIn ) ) ) { pIn + = 2 ; pMatch + = 2 ; }
if ( ( pIn < pInLimit ) & & ( * pMatch = = * pIn ) ) pIn + + ;
return ( size_t ) ( pIn - pStart ) ;
}
/** ZSTD_count_2segments() :
* can count match length with ` ip ` & ` match ` in 2 different segments .
* convention : on reaching mEnd , match count continue starting from iStart
*/
MEM_STATIC size_t ZSTD_count_2segments ( const BYTE * ip , const BYTE * match , const BYTE * iEnd , const BYTE * mEnd , const BYTE * iStart )
{
const BYTE * const vEnd = MIN ( ip + ( mEnd - match ) , iEnd ) ;
size_t const matchLength = ZSTD_count ( ip , match , vEnd ) ;
if ( match + matchLength ! = mEnd ) return matchLength ;
return matchLength + ZSTD_count ( ip + matchLength , iStart , iEnd ) ;
}
/*-*************************************
* Hashes
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static const U32 prime3bytes = 506832829U ;
static U32 ZSTD_hash3 ( U32 u , U32 h ) { return ( ( u < < ( 32 - 24 ) ) * prime3bytes ) > > ( 32 - h ) ; }
MEM_STATIC size_t ZSTD_hash3Ptr ( const void * ptr , U32 h ) { return ZSTD_hash3 ( MEM_readLE32 ( ptr ) , h ) ; } /* only in zstd_opt.h */
static const U32 prime4bytes = 2654435761U ;
static U32 ZSTD_hash4 ( U32 u , U32 h ) { return ( u * prime4bytes ) > > ( 32 - h ) ; }
static size_t ZSTD_hash4Ptr ( const void * ptr , U32 h ) { return ZSTD_hash4 ( MEM_read32 ( ptr ) , h ) ; }
static const U64 prime5bytes = 889523592379ULL ;
static size_t ZSTD_hash5 ( U64 u , U32 h ) { return ( size_t ) ( ( ( u < < ( 64 - 40 ) ) * prime5bytes ) > > ( 64 - h ) ) ; }
static size_t ZSTD_hash5Ptr ( const void * p , U32 h ) { return ZSTD_hash5 ( MEM_readLE64 ( p ) , h ) ; }
static const U64 prime6bytes = 227718039650203ULL ;
static size_t ZSTD_hash6 ( U64 u , U32 h ) { return ( size_t ) ( ( ( u < < ( 64 - 48 ) ) * prime6bytes ) > > ( 64 - h ) ) ; }
static size_t ZSTD_hash6Ptr ( const void * p , U32 h ) { return ZSTD_hash6 ( MEM_readLE64 ( p ) , h ) ; }
static const U64 prime7bytes = 58295818150454627ULL ;
static size_t ZSTD_hash7 ( U64 u , U32 h ) { return ( size_t ) ( ( ( u < < ( 64 - 56 ) ) * prime7bytes ) > > ( 64 - h ) ) ; }
static size_t ZSTD_hash7Ptr ( const void * p , U32 h ) { return ZSTD_hash7 ( MEM_readLE64 ( p ) , h ) ; }
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL ;
static size_t ZSTD_hash8 ( U64 u , U32 h ) { return ( size_t ) ( ( ( u ) * prime8bytes ) > > ( 64 - h ) ) ; }
static size_t ZSTD_hash8Ptr ( const void * p , U32 h ) { return ZSTD_hash8 ( MEM_readLE64 ( p ) , h ) ; }
MEM_STATIC size_t ZSTD_hashPtr ( const void * p , U32 hBits , U32 mls )
{
switch ( mls )
{
default :
case 4 : return ZSTD_hash4Ptr ( p , hBits ) ;
case 5 : return ZSTD_hash5Ptr ( p , hBits ) ;
case 6 : return ZSTD_hash6Ptr ( p , hBits ) ;
case 7 : return ZSTD_hash7Ptr ( p , hBits ) ;
case 8 : return ZSTD_hash8Ptr ( p , hBits ) ;
}
}
# if defined (__cplusplus)
}
# endif
2017-11-07 15:27:06 -08:00
/* ==============================================================
* Private declarations
* These prototypes shall only be called from within lib / compress
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*! ZSTD_initCStream_internal() :
* Private use only . Init streaming operation .
* expects params to be valid .
* must receive dict , or cdict , or none , but not both .
* @ return : 0 , or an error code */
size_t ZSTD_initCStream_internal ( ZSTD_CStream * zcs ,
const void * dict , size_t dictSize ,
const ZSTD_CDict * cdict ,
ZSTD_CCtx_params params , unsigned long long pledgedSrcSize ) ;
/*! ZSTD_compressStream_generic() :
* Private use only . To be called from zstdmt_compress . c in single - thread mode . */
size_t ZSTD_compressStream_generic ( ZSTD_CStream * zcs ,
ZSTD_outBuffer * output ,
ZSTD_inBuffer * input ,
ZSTD_EndDirective const flushMode ) ;
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
ZSTD_compressionParameters ZSTD_getCParamsFromCDict ( const ZSTD_CDict * cdict ) ;
/* ZSTD_compressBegin_advanced_internal() :
* Private use only . To be called from zstdmt_compress . c . */
size_t ZSTD_compressBegin_advanced_internal ( ZSTD_CCtx * cctx ,
const void * dict , size_t dictSize ,
ZSTD_dictMode_e dictMode ,
2017-12-12 16:20:51 -08:00
const ZSTD_CDict * cdict ,
2017-11-07 15:27:06 -08:00
ZSTD_CCtx_params params ,
unsigned long long pledgedSrcSize ) ;
/* ZSTD_compress_advanced_internal() :
* Private use only . To be called from zstdmt_compress . c . */
size_t ZSTD_compress_advanced_internal ( ZSTD_CCtx * cctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
const void * dict , size_t dictSize ,
ZSTD_CCtx_params params ) ;
2018-01-25 17:35:49 -08:00
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end - of - frame mark to complete a frame
* @ return : size of data written into ` dst ` ( = = ZSTD_blockHeaderSize ( defined in zstd_internal . h ) )
* or an error code if ` dstCapcity ` is too small ( < ZSTD_blockHeaderSize )
*/
size_t ZSTD_writeLastEmptyBlock ( void * dst , size_t dstCapacity ) ;
2017-09-01 18:28:35 -07:00
# endif /* ZSTD_COMPRESS_H */