Merge pull request #846 from facebook/splitDict

Split dict
dev
Yann Collet 2017-09-15 23:26:23 -07:00 committed by GitHub
commit 4a52a89026
6 changed files with 155 additions and 82 deletions

View File

@ -845,6 +845,35 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
</b>/* advanced parameters - may not remain available after API update */<b> </b>/* advanced parameters - may not remain available after API update */<b>
ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b> ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b>
* even when referencing into Dictionary content (default:0) */ * even when referencing into Dictionary content (default:0) */
ZSTD_p_enableLongDistanceMatching=1200, </b>/* Enable long distance matching.<b>
* This parameter is designed to improve the compression
* ratio for large inputs with long distance matches.
* This increases the memory usage as well as window size.
* Note: setting this parameter sets all the LDM parameters
* as well as ZSTD_p_windowLog. It should be set after
* ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
* other LDM parameters. Setting the compression level
* after this parameter overrides the window log, though LDM
* will remain enabled until explicitly disabled. */
ZSTD_p_ldmHashLog, </b>/* Size of the table for long distance matching, as a power of 2.<b>
* Larger values increase memory usage and compression ratio, but decrease
* compression speed.
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
* (default: 20). */
ZSTD_p_ldmMinMatch, </b>/* Minimum size of searched matches for long distance matcher.<b>
* Larger/too small values usually decrease compression ratio.
* Must be clamped between ZSTD_LDM_MINMATCH_MIN
* and ZSTD_LDM_MINMATCH_MAX (default: 64). */
ZSTD_p_ldmBucketSizeLog, </b>/* Log size of each bucket in the LDM hash table for collision resolution.<b>
* Larger values usually improve collision resolution but may decrease
* compression speed.
* The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
ZSTD_p_ldmHashEveryLog, </b>/* Frequency of inserting/looking up entries in the LDM hash table.<b>
* The default is MAX(0, (windowLog - ldmHashLog)) to
* optimize hash table usage.
* Larger values improve compression speed. Deviating far from the
* default value will likely result in a decrease in compression ratio.
* Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
} ZSTD_cParameter; } ZSTD_cParameter;
</b></pre><BR> </b></pre><BR>

View File

@ -375,7 +375,7 @@ static int isIncluded(const void* in, const void* container, size_t length)
return u==length; return u==length;
} }
/*! ZDICT_checkMerge /*! ZDICT_tryMerge() :
check if dictItem can be merged, do it if possible check if dictItem can be merged, do it if possible
@return : id of destination elt, 0 if not merged @return : id of destination elt, 0 if not merged
*/ */
@ -440,8 +440,8 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
static void ZDICT_removeDictItem(dictItem* table, U32 id) static void ZDICT_removeDictItem(dictItem* table, U32 id)
{ {
/* convention : first element is nb of elts */ /* convention : table[0].pos stores nb of elts */
U32 const max = table->pos; U32 const max = table[0].pos;
U32 u; U32 u;
if (!id) return; /* protection, should never happen */ if (!id) return; /* protection, should never happen */
for (u=id; u<max-1; u++) for (u=id; u<max-1; u++)

View File

@ -14,7 +14,7 @@
* Compiler Warnings * Compiler Warnings
****************************************/ ****************************************/
#ifdef _MSC_VER #ifdef _MSC_VER
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif #endif
@ -44,7 +44,7 @@
#define SAMPLESIZE_MAX (128 KB) #define SAMPLESIZE_MAX (128 KB)
#define MEMMULT 11 /* rough estimation : memory cost to analyze 1 byte of sample */ #define MEMMULT 11 /* rough estimation : memory cost to analyze 1 byte of sample */
#define COVER_MEMMULT 9 /* rough estimation : memory cost to analyze 1 byte of sample */ #define COVER_MEMMULT 9 /* rough estimation : memory cost to analyze 1 byte of sample */
static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t)); static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t));
#define NOISELENGTH 32 #define NOISELENGTH 32
@ -53,13 +53,12 @@ static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_
* Console display * Console display
***************************************/ ***************************************/
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } #define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
static int g_displayLevel = 0; /* 0 : no display; 1: errors; 2: default; 4: full information */
#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ #define DISPLAYUPDATE(l, ...) if (displayLevel>=l) { \
if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \ if ((DIB_clockSpan(g_time) > refreshRate) || (displayLevel>=4)) \
{ g_time = clock(); DISPLAY(__VA_ARGS__); \ { g_time = clock(); DISPLAY(__VA_ARGS__); \
if (g_displayLevel>=4) fflush(stderr); } } if (displayLevel>=4) fflush(stderr); } }
static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10; static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
static clock_t g_time = 0; static clock_t g_time = 0;
@ -76,9 +75,9 @@ static clock_t DIB_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
#define EXM_THROW(error, ...) \ #define EXM_THROW(error, ...) \
{ \ { \
DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
DISPLAYLEVEL(1, "Error %i : ", error); \ DISPLAY("Error %i : ", error); \
DISPLAYLEVEL(1, __VA_ARGS__); \ DISPLAY(__VA_ARGS__); \
DISPLAYLEVEL(1, "\n"); \ DISPLAY("\n"); \
exit(error); \ exit(error); \
} }
@ -98,32 +97,55 @@ const char* DiB_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCo
* File related operations * File related operations
**********************************************************/ **********************************************************/
/** DiB_loadFiles() : /** DiB_loadFiles() :
* @return : nb of files effectively loaded into `buffer` */ * load samples from files listed in fileNamesTable into buffer.
* works even if buffer is too small to load all samples.
* Also provides the size of each sample into sampleSizes table
* which must be sized correctly, using DiB_fileStats().
* @return : nb of samples effectively loaded into `buffer`
* *bufferSizePtr is modified, it provides the amount data loaded within buffer.
* sampleSizes is filled with the size of each sample.
*/
static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr, static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr,
size_t* fileSizes, size_t* sampleSizes, unsigned sstSize,
const char** fileNamesTable, unsigned nbFiles) const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize,
unsigned displayLevel)
{ {
char* const buff = (char*)buffer; char* const buff = (char*)buffer;
size_t pos = 0; size_t pos = 0;
unsigned n; unsigned nbLoadedChunks = 0, fileIndex;
for (n=0; n<nbFiles; n++) { for (fileIndex=0; fileIndex<nbFiles; fileIndex++) {
const char* const fileName = fileNamesTable[n]; const char* const fileName = fileNamesTable[fileIndex];
unsigned long long const fs64 = UTIL_getFileSize(fileName); unsigned long long const fs64 = UTIL_getFileSize(fileName);
size_t const fileSize = (size_t) MIN(fs64, SAMPLESIZE_MAX); unsigned long long remainingToLoad = fs64;
if (fileSize > *bufferSizePtr-pos) break; U32 const nbChunks = targetChunkSize ? (U32)((fs64 + (targetChunkSize-1)) / targetChunkSize) : 1;
{ FILE* const f = fopen(fileName, "rb"); U64 const chunkSize = targetChunkSize ? MIN(targetChunkSize, fs64) : fs64;
if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno)); size_t const maxChunkSize = (size_t)MIN(chunkSize, SAMPLESIZE_MAX);
DISPLAYUPDATE(2, "Loading %s... \r", fileName); U32 cnb;
{ size_t const readSize = fread(buff+pos, 1, fileSize, f); FILE* const f = fopen(fileName, "rb");
if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fileName); if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
pos += readSize; } DISPLAYUPDATE(2, "Loading %s... \r", fileName);
fileSizes[n] = fileSize; for (cnb=0; cnb<nbChunks; cnb++) {
fclose(f); size_t const toLoad = (size_t)MIN(maxChunkSize, remainingToLoad);
} } if (toLoad > *bufferSizePtr-pos) break;
{ size_t const readSize = fread(buff+pos, 1, toLoad, f);
if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName);
pos += readSize;
sampleSizes[nbLoadedChunks++] = toLoad;
remainingToLoad -= targetChunkSize;
if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */
fileIndex = nbFiles; /* stop there */
break;
}
if (toLoad < targetChunkSize) {
fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR);
} } }
fclose(f);
}
DISPLAYLEVEL(2, "\r%79s\r", ""); DISPLAYLEVEL(2, "\r%79s\r", "");
*bufferSizePtr = pos; *bufferSizePtr = pos;
return n; DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10))
return nbLoadedChunks;
} }
#define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r)))
@ -139,16 +161,19 @@ static U32 DiB_rand(U32* src)
return rand32 >> 5; return rand32 >> 5;
} }
/* DiB_shuffle() :
* shuffle a table of file names in a semi-random way
* It improves dictionary quality by reducing "locality" impact, so if sample set is very large,
* it will load random elements from it, instead of just the first ones. */
static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) { static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) {
/* Initialize the pseudorandom number generator */ U32 seed = 0xFD2FB528;
U32 seed = 0xFD2FB528; unsigned i;
unsigned i; for (i = nbFiles - 1; i > 0; --i) {
for (i = nbFiles - 1; i > 0; --i) { unsigned const j = DiB_rand(&seed) % (i + 1);
unsigned const j = DiB_rand(&seed) % (i + 1); const char* const tmp = fileNamesTable[j];
const char* tmp = fileNamesTable[j]; fileNamesTable[j] = fileNamesTable[i];
fileNamesTable[j] = fileNamesTable[i]; fileNamesTable[i] = tmp;
fileNamesTable[i] = tmp; }
}
} }
@ -162,7 +187,7 @@ static size_t DiB_findMaxMem(unsigned long long requiredMem)
requiredMem = (((requiredMem >> 23) + 1) << 23); requiredMem = (((requiredMem >> 23) + 1) << 23);
requiredMem += step; requiredMem += step;
if (requiredMem > maxMemory) requiredMem = maxMemory; if (requiredMem > g_maxMemory) requiredMem = g_maxMemory;
while (!testmem) { while (!testmem) {
testmem = malloc((size_t)requiredMem); testmem = malloc((size_t)requiredMem);
@ -202,18 +227,33 @@ static void DiB_saveDict(const char* dictFileName,
} }
static int g_tooLargeSamples = 0; typedef struct {
static U64 DiB_getTotalCappedFileSize(const char** fileNamesTable, unsigned nbFiles) U64 totalSizeToLoad;
unsigned oneSampleTooLarge;
unsigned nbSamples;
} fileStats;
/*! DiB_fileStats() :
* Given a list of files, and a chunkSize (0 == no chunk, whole files)
* provides the amount of data to be loaded and the resulting nb of samples.
* This is useful primarily for allocation purpose => sample buffer, and sample sizes table.
*/
static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel)
{ {
U64 total = 0; fileStats fs;
unsigned n; unsigned n;
memset(&fs, 0, sizeof(fs));
for (n=0; n<nbFiles; n++) { for (n=0; n<nbFiles; n++) {
U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]); U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]);
U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX); U32 const nbSamples = (U32)(chunkSize ? (fileSize + (chunkSize-1)) / chunkSize : 1);
total += cappedFileSize; U64 const chunkToLoad = chunkSize ? MIN(chunkSize, fileSize) : fileSize;
g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX); size_t const cappedChunkSize = (size_t)MIN(chunkToLoad, SAMPLESIZE_MAX);
fs.totalSizeToLoad += cappedChunkSize * nbSamples;
fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
fs.nbSamples += nbSamples;
} }
return total; DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10));
return fs;
} }
@ -230,64 +270,66 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCa
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
const char** fileNamesTable, unsigned nbFiles, const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams, ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover) int optimizeCover)
{ {
unsigned const displayLevel = params ? params->zParams.notificationLevel :
coverParams ? coverParams->zParams.notificationLevel :
0; /* should never happen */
void* const dictBuffer = malloc(maxDictSize); void* const dictBuffer = malloc(maxDictSize);
size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t)); fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel);
unsigned long long const totalSizeToLoad = DiB_getTotalCappedFileSize(fileNamesTable, nbFiles); size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t));
size_t const memMult = params ? MEMMULT : COVER_MEMMULT; size_t const memMult = params ? MEMMULT : COVER_MEMMULT;
size_t const maxMem = DiB_findMaxMem(totalSizeToLoad * memMult) / memMult; size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult;
size_t benchedSize = (size_t) MIN ((unsigned long long)maxMem, totalSizeToLoad); size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad);
void* const srcBuffer = malloc(benchedSize+NOISELENGTH); void* const srcBuffer = malloc(loadedSize+NOISELENGTH);
int result = 0; int result = 0;
/* Checks */ /* Checks */
if (params) g_displayLevel = params->zParams.notificationLevel; if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer))
else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel; EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
else EXM_THROW(13, "Neither dictionary algorith selected"); /* should not happen */ if (fs.oneSampleTooLarge) {
if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */ DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n");
if (g_tooLargeSamples) { DISPLAYLEVEL(2, "! Note that dictionary is only useful for small samples. \n");
DISPLAYLEVEL(2, "! Warning : some samples are very large \n"); DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX);
DISPLAYLEVEL(2, "! Note that dictionary is only useful for small files or beginning of large files. \n");
DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each file are loaded \n", SAMPLESIZE_MAX);
} }
if ((nbFiles < 5) || (totalSizeToLoad < 9 * (unsigned long long)maxDictSize)) { if (fs.nbSamples < 5) {
DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n"); DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n");
DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n"); DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n");
DISPLAYLEVEL(2, "! Do not concatenate samples together into a single file, \n"); DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n");
DISPLAYLEVEL(2, "! as dictBuilder will be unable to find the beginning of each sample, \n"); EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */
DISPLAYLEVEL(2, "! resulting in poor dictionary quality. \n"); }
if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) {
DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n");
DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n");
} }
/* init */ /* init */
if (benchedSize < totalSizeToLoad) if (loadedSize < fs.totalSizeToLoad)
DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(benchedSize >> 20)); DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20));
/* Load input buffer */ /* Load input buffer */
DISPLAYLEVEL(3, "Shuffling input files\n"); DISPLAYLEVEL(3, "Shuffling input files\n");
DiB_shuffle(fileNamesTable, nbFiles); DiB_shuffle(fileNamesTable, nbFiles);
nbFiles = DiB_loadFiles(srcBuffer, &benchedSize, fileSizes, fileNamesTable, nbFiles); nbFiles = DiB_loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel);
{ { size_t dictSize;
size_t dictSize;
if (params) { if (params) {
DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH); /* guard band, for end of buffer condition */ DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */
dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize, dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles, srcBuffer, sampleSizes, fs.nbSamples,
*params); *params);
} else if (optimizeCover) { } else if (optimizeCover) {
dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize, dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles, srcBuffer, sampleSizes, fs.nbSamples,
coverParams); coverParams);
if (!ZDICT_isError(dictSize)) { if (!ZDICT_isError(dictSize)) {
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps); DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
} }
} else { } else {
dictSize = dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, fs.nbSamples, *coverParams);
fileSizes, nbFiles, *coverParams);
} }
if (ZDICT_isError(dictSize)) { if (ZDICT_isError(dictSize)) {
DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */
@ -302,7 +344,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
/* clean up */ /* clean up */
_cleanup: _cleanup:
free(srcBuffer); free(srcBuffer);
free(sampleSizes);
free(dictBuffer); free(dictBuffer);
free(fileSizes);
return result; return result;
} }

View File

@ -32,7 +32,7 @@
@return : 0 == ok. Any other : error. @return : 0 == ok. Any other : error.
*/ */
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
const char** fileNamesTable, unsigned nbFiles, const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams, ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover); int optimizeCover);

View File

@ -184,6 +184,8 @@ Typical gains range from 10% (at 64KB) to x5 better (at <1KB).
Dictionary saved into `file` (default name: dictionary). Dictionary saved into `file` (default name: dictionary).
* `--maxdict=#`: * `--maxdict=#`:
Limit dictionary to specified size (default: 112640). Limit dictionary to specified size (default: 112640).
* `-B#`:
Split input files in blocks of size # (default: no split)
* `--dictID=#`: * `--dictID=#`:
A dictionary ID is a locally unique ID that a decoder can use to verify it is A dictionary ID is a locally unique ID that a decoder can use to verify it is
using the right dictionary. using the right dictionary.

View File

@ -759,13 +759,13 @@ int main(int argCount, const char* argv[])
int const optimize = !coverParams.k || !coverParams.d; int const optimize = !coverParams.k || !coverParams.d;
coverParams.nbThreads = nbThreads; coverParams.nbThreads = nbThreads;
coverParams.zParams = zParams; coverParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize); operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, optimize);
} else { } else {
ZDICT_legacy_params_t dictParams; ZDICT_legacy_params_t dictParams;
memset(&dictParams, 0, sizeof(dictParams)); memset(&dictParams, 0, sizeof(dictParams));
dictParams.selectivityLevel = dictSelect; dictParams.selectivityLevel = dictSelect;
dictParams.zParams = zParams; dictParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0); operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, &dictParams, NULL, 0);
} }
#endif #endif
goto _end; goto _end;