commit
4a52a89026
|
@ -845,6 +845,35 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
|
|||
</b>/* advanced parameters - may not remain available after API update */<b>
|
||||
ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b>
|
||||
* even when referencing into Dictionary content (default:0) */
|
||||
ZSTD_p_enableLongDistanceMatching=1200, </b>/* Enable long distance matching.<b>
|
||||
* This parameter is designed to improve the compression
|
||||
* ratio for large inputs with long distance matches.
|
||||
* This increases the memory usage as well as window size.
|
||||
* Note: setting this parameter sets all the LDM parameters
|
||||
* as well as ZSTD_p_windowLog. It should be set after
|
||||
* ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
|
||||
* other LDM parameters. Setting the compression level
|
||||
* after this parameter overrides the window log, though LDM
|
||||
* will remain enabled until explicitly disabled. */
|
||||
ZSTD_p_ldmHashLog, </b>/* Size of the table for long distance matching, as a power of 2.<b>
|
||||
* Larger values increase memory usage and compression ratio, but decrease
|
||||
* compression speed.
|
||||
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
|
||||
* (default: 20). */
|
||||
ZSTD_p_ldmMinMatch, </b>/* Minimum size of searched matches for long distance matcher.<b>
|
||||
* Larger/too small values usually decrease compression ratio.
|
||||
* Must be clamped between ZSTD_LDM_MINMATCH_MIN
|
||||
* and ZSTD_LDM_MINMATCH_MAX (default: 64). */
|
||||
ZSTD_p_ldmBucketSizeLog, </b>/* Log size of each bucket in the LDM hash table for collision resolution.<b>
|
||||
* Larger values usually improve collision resolution but may decrease
|
||||
* compression speed.
|
||||
* The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
|
||||
ZSTD_p_ldmHashEveryLog, </b>/* Frequency of inserting/looking up entries in the LDM hash table.<b>
|
||||
* The default is MAX(0, (windowLog - ldmHashLog)) to
|
||||
* optimize hash table usage.
|
||||
* Larger values improve compression speed. Deviating far from the
|
||||
* default value will likely result in a decrease in compression ratio.
|
||||
* Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
|
||||
|
||||
} ZSTD_cParameter;
|
||||
</b></pre><BR>
|
||||
|
|
|
@ -375,7 +375,7 @@ static int isIncluded(const void* in, const void* container, size_t length)
|
|||
return u==length;
|
||||
}
|
||||
|
||||
/*! ZDICT_checkMerge
|
||||
/*! ZDICT_tryMerge() :
|
||||
check if dictItem can be merged, do it if possible
|
||||
@return : id of destination elt, 0 if not merged
|
||||
*/
|
||||
|
@ -440,8 +440,8 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
|
|||
|
||||
static void ZDICT_removeDictItem(dictItem* table, U32 id)
|
||||
{
|
||||
/* convention : first element is nb of elts */
|
||||
U32 const max = table->pos;
|
||||
/* convention : table[0].pos stores nb of elts */
|
||||
U32 const max = table[0].pos;
|
||||
U32 u;
|
||||
if (!id) return; /* protection, should never happen */
|
||||
for (u=id; u<max-1; u++)
|
||||
|
|
170
programs/dibio.c
170
programs/dibio.c
|
@ -44,7 +44,7 @@
|
|||
#define SAMPLESIZE_MAX (128 KB)
|
||||
#define MEMMULT 11 /* rough estimation : memory cost to analyze 1 byte of sample */
|
||||
#define COVER_MEMMULT 9 /* rough estimation : memory cost to analyze 1 byte of sample */
|
||||
static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t));
|
||||
static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t));
|
||||
|
||||
#define NOISELENGTH 32
|
||||
|
||||
|
@ -53,13 +53,12 @@ static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_
|
|||
* Console display
|
||||
***************************************/
|
||||
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
|
||||
#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
|
||||
static int g_displayLevel = 0; /* 0 : no display; 1: errors; 2: default; 4: full information */
|
||||
#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
|
||||
|
||||
#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
|
||||
if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \
|
||||
#define DISPLAYUPDATE(l, ...) if (displayLevel>=l) { \
|
||||
if ((DIB_clockSpan(g_time) > refreshRate) || (displayLevel>=4)) \
|
||||
{ g_time = clock(); DISPLAY(__VA_ARGS__); \
|
||||
if (g_displayLevel>=4) fflush(stderr); } }
|
||||
if (displayLevel>=4) fflush(stderr); } }
|
||||
static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
|
||||
static clock_t g_time = 0;
|
||||
|
||||
|
@ -76,9 +75,9 @@ static clock_t DIB_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
|
|||
#define EXM_THROW(error, ...) \
|
||||
{ \
|
||||
DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
|
||||
DISPLAYLEVEL(1, "Error %i : ", error); \
|
||||
DISPLAYLEVEL(1, __VA_ARGS__); \
|
||||
DISPLAYLEVEL(1, "\n"); \
|
||||
DISPLAY("Error %i : ", error); \
|
||||
DISPLAY(__VA_ARGS__); \
|
||||
DISPLAY("\n"); \
|
||||
exit(error); \
|
||||
}
|
||||
|
||||
|
@ -98,32 +97,55 @@ const char* DiB_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCo
|
|||
* File related operations
|
||||
**********************************************************/
|
||||
/** DiB_loadFiles() :
|
||||
* @return : nb of files effectively loaded into `buffer` */
|
||||
* load samples from files listed in fileNamesTable into buffer.
|
||||
* works even if buffer is too small to load all samples.
|
||||
* Also provides the size of each sample into sampleSizes table
|
||||
* which must be sized correctly, using DiB_fileStats().
|
||||
* @return : nb of samples effectively loaded into `buffer`
|
||||
* *bufferSizePtr is modified, it provides the amount data loaded within buffer.
|
||||
* sampleSizes is filled with the size of each sample.
|
||||
*/
|
||||
static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr,
|
||||
size_t* fileSizes,
|
||||
const char** fileNamesTable, unsigned nbFiles)
|
||||
size_t* sampleSizes, unsigned sstSize,
|
||||
const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize,
|
||||
unsigned displayLevel)
|
||||
{
|
||||
char* const buff = (char*)buffer;
|
||||
size_t pos = 0;
|
||||
unsigned n;
|
||||
unsigned nbLoadedChunks = 0, fileIndex;
|
||||
|
||||
for (n=0; n<nbFiles; n++) {
|
||||
const char* const fileName = fileNamesTable[n];
|
||||
for (fileIndex=0; fileIndex<nbFiles; fileIndex++) {
|
||||
const char* const fileName = fileNamesTable[fileIndex];
|
||||
unsigned long long const fs64 = UTIL_getFileSize(fileName);
|
||||
size_t const fileSize = (size_t) MIN(fs64, SAMPLESIZE_MAX);
|
||||
if (fileSize > *bufferSizePtr-pos) break;
|
||||
{ FILE* const f = fopen(fileName, "rb");
|
||||
unsigned long long remainingToLoad = fs64;
|
||||
U32 const nbChunks = targetChunkSize ? (U32)((fs64 + (targetChunkSize-1)) / targetChunkSize) : 1;
|
||||
U64 const chunkSize = targetChunkSize ? MIN(targetChunkSize, fs64) : fs64;
|
||||
size_t const maxChunkSize = (size_t)MIN(chunkSize, SAMPLESIZE_MAX);
|
||||
U32 cnb;
|
||||
FILE* const f = fopen(fileName, "rb");
|
||||
if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
|
||||
DISPLAYUPDATE(2, "Loading %s... \r", fileName);
|
||||
{ size_t const readSize = fread(buff+pos, 1, fileSize, f);
|
||||
if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fileName);
|
||||
pos += readSize; }
|
||||
fileSizes[n] = fileSize;
|
||||
for (cnb=0; cnb<nbChunks; cnb++) {
|
||||
size_t const toLoad = (size_t)MIN(maxChunkSize, remainingToLoad);
|
||||
if (toLoad > *bufferSizePtr-pos) break;
|
||||
{ size_t const readSize = fread(buff+pos, 1, toLoad, f);
|
||||
if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName);
|
||||
pos += readSize;
|
||||
sampleSizes[nbLoadedChunks++] = toLoad;
|
||||
remainingToLoad -= targetChunkSize;
|
||||
if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */
|
||||
fileIndex = nbFiles; /* stop there */
|
||||
break;
|
||||
}
|
||||
if (toLoad < targetChunkSize) {
|
||||
fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR);
|
||||
} } }
|
||||
fclose(f);
|
||||
} }
|
||||
}
|
||||
DISPLAYLEVEL(2, "\r%79s\r", "");
|
||||
*bufferSizePtr = pos;
|
||||
return n;
|
||||
DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10))
|
||||
return nbLoadedChunks;
|
||||
}
|
||||
|
||||
#define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
|
@ -139,13 +161,16 @@ static U32 DiB_rand(U32* src)
|
|||
return rand32 >> 5;
|
||||
}
|
||||
|
||||
/* DiB_shuffle() :
|
||||
* shuffle a table of file names in a semi-random way
|
||||
* It improves dictionary quality by reducing "locality" impact, so if sample set is very large,
|
||||
* it will load random elements from it, instead of just the first ones. */
|
||||
static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) {
|
||||
/* Initialize the pseudorandom number generator */
|
||||
U32 seed = 0xFD2FB528;
|
||||
unsigned i;
|
||||
for (i = nbFiles - 1; i > 0; --i) {
|
||||
unsigned const j = DiB_rand(&seed) % (i + 1);
|
||||
const char* tmp = fileNamesTable[j];
|
||||
const char* const tmp = fileNamesTable[j];
|
||||
fileNamesTable[j] = fileNamesTable[i];
|
||||
fileNamesTable[i] = tmp;
|
||||
}
|
||||
|
@ -162,7 +187,7 @@ static size_t DiB_findMaxMem(unsigned long long requiredMem)
|
|||
|
||||
requiredMem = (((requiredMem >> 23) + 1) << 23);
|
||||
requiredMem += step;
|
||||
if (requiredMem > maxMemory) requiredMem = maxMemory;
|
||||
if (requiredMem > g_maxMemory) requiredMem = g_maxMemory;
|
||||
|
||||
while (!testmem) {
|
||||
testmem = malloc((size_t)requiredMem);
|
||||
|
@ -202,18 +227,33 @@ static void DiB_saveDict(const char* dictFileName,
|
|||
}
|
||||
|
||||
|
||||
static int g_tooLargeSamples = 0;
|
||||
static U64 DiB_getTotalCappedFileSize(const char** fileNamesTable, unsigned nbFiles)
|
||||
typedef struct {
|
||||
U64 totalSizeToLoad;
|
||||
unsigned oneSampleTooLarge;
|
||||
unsigned nbSamples;
|
||||
} fileStats;
|
||||
|
||||
/*! DiB_fileStats() :
|
||||
* Given a list of files, and a chunkSize (0 == no chunk, whole files)
|
||||
* provides the amount of data to be loaded and the resulting nb of samples.
|
||||
* This is useful primarily for allocation purpose => sample buffer, and sample sizes table.
|
||||
*/
|
||||
static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel)
|
||||
{
|
||||
U64 total = 0;
|
||||
fileStats fs;
|
||||
unsigned n;
|
||||
memset(&fs, 0, sizeof(fs));
|
||||
for (n=0; n<nbFiles; n++) {
|
||||
U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]);
|
||||
U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX);
|
||||
total += cappedFileSize;
|
||||
g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX);
|
||||
U32 const nbSamples = (U32)(chunkSize ? (fileSize + (chunkSize-1)) / chunkSize : 1);
|
||||
U64 const chunkToLoad = chunkSize ? MIN(chunkSize, fileSize) : fileSize;
|
||||
size_t const cappedChunkSize = (size_t)MIN(chunkToLoad, SAMPLESIZE_MAX);
|
||||
fs.totalSizeToLoad += cappedChunkSize * nbSamples;
|
||||
fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
|
||||
fs.nbSamples += nbSamples;
|
||||
}
|
||||
return total;
|
||||
DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10));
|
||||
return fs;
|
||||
}
|
||||
|
||||
|
||||
|
@ -230,64 +270,66 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCa
|
|||
|
||||
|
||||
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
|
||||
const char** fileNamesTable, unsigned nbFiles,
|
||||
const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
|
||||
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
|
||||
int optimizeCover)
|
||||
{
|
||||
unsigned const displayLevel = params ? params->zParams.notificationLevel :
|
||||
coverParams ? coverParams->zParams.notificationLevel :
|
||||
0; /* should never happen */
|
||||
void* const dictBuffer = malloc(maxDictSize);
|
||||
size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
|
||||
unsigned long long const totalSizeToLoad = DiB_getTotalCappedFileSize(fileNamesTable, nbFiles);
|
||||
fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel);
|
||||
size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t));
|
||||
size_t const memMult = params ? MEMMULT : COVER_MEMMULT;
|
||||
size_t const maxMem = DiB_findMaxMem(totalSizeToLoad * memMult) / memMult;
|
||||
size_t benchedSize = (size_t) MIN ((unsigned long long)maxMem, totalSizeToLoad);
|
||||
void* const srcBuffer = malloc(benchedSize+NOISELENGTH);
|
||||
size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult;
|
||||
size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad);
|
||||
void* const srcBuffer = malloc(loadedSize+NOISELENGTH);
|
||||
int result = 0;
|
||||
|
||||
/* Checks */
|
||||
if (params) g_displayLevel = params->zParams.notificationLevel;
|
||||
else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel;
|
||||
else EXM_THROW(13, "Neither dictionary algorith selected"); /* should not happen */
|
||||
if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
|
||||
if (g_tooLargeSamples) {
|
||||
DISPLAYLEVEL(2, "! Warning : some samples are very large \n");
|
||||
DISPLAYLEVEL(2, "! Note that dictionary is only useful for small files or beginning of large files. \n");
|
||||
DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each file are loaded \n", SAMPLESIZE_MAX);
|
||||
if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer))
|
||||
EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
|
||||
if (fs.oneSampleTooLarge) {
|
||||
DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n");
|
||||
DISPLAYLEVEL(2, "! Note that dictionary is only useful for small samples. \n");
|
||||
DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX);
|
||||
}
|
||||
if ((nbFiles < 5) || (totalSizeToLoad < 9 * (unsigned long long)maxDictSize)) {
|
||||
if (fs.nbSamples < 5) {
|
||||
DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n");
|
||||
DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n");
|
||||
DISPLAYLEVEL(2, "! Do not concatenate samples together into a single file, \n");
|
||||
DISPLAYLEVEL(2, "! as dictBuilder will be unable to find the beginning of each sample, \n");
|
||||
DISPLAYLEVEL(2, "! resulting in poor dictionary quality. \n");
|
||||
DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n");
|
||||
EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */
|
||||
}
|
||||
if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) {
|
||||
DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n");
|
||||
DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n");
|
||||
}
|
||||
|
||||
/* init */
|
||||
if (benchedSize < totalSizeToLoad)
|
||||
DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(benchedSize >> 20));
|
||||
if (loadedSize < fs.totalSizeToLoad)
|
||||
DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20));
|
||||
|
||||
/* Load input buffer */
|
||||
DISPLAYLEVEL(3, "Shuffling input files\n");
|
||||
DiB_shuffle(fileNamesTable, nbFiles);
|
||||
nbFiles = DiB_loadFiles(srcBuffer, &benchedSize, fileSizes, fileNamesTable, nbFiles);
|
||||
nbFiles = DiB_loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel);
|
||||
|
||||
{
|
||||
size_t dictSize;
|
||||
{ size_t dictSize;
|
||||
if (params) {
|
||||
DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH); /* guard band, for end of buffer condition */
|
||||
DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */
|
||||
dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize,
|
||||
srcBuffer, fileSizes, nbFiles,
|
||||
srcBuffer, sampleSizes, fs.nbSamples,
|
||||
*params);
|
||||
} else if (optimizeCover) {
|
||||
dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize,
|
||||
srcBuffer, fileSizes, nbFiles,
|
||||
srcBuffer, sampleSizes, fs.nbSamples,
|
||||
coverParams);
|
||||
if (!ZDICT_isError(dictSize)) {
|
||||
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
|
||||
}
|
||||
} else {
|
||||
dictSize =
|
||||
ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
|
||||
fileSizes, nbFiles, *coverParams);
|
||||
dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
|
||||
sampleSizes, fs.nbSamples, *coverParams);
|
||||
}
|
||||
if (ZDICT_isError(dictSize)) {
|
||||
DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */
|
||||
|
@ -302,7 +344,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
|
|||
/* clean up */
|
||||
_cleanup:
|
||||
free(srcBuffer);
|
||||
free(sampleSizes);
|
||||
free(dictBuffer);
|
||||
free(fileSizes);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
@return : 0 == ok. Any other : error.
|
||||
*/
|
||||
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
|
||||
const char** fileNamesTable, unsigned nbFiles,
|
||||
const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
|
||||
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
|
||||
int optimizeCover);
|
||||
|
||||
|
|
|
@ -184,6 +184,8 @@ Typical gains range from 10% (at 64KB) to x5 better (at <1KB).
|
|||
Dictionary saved into `file` (default name: dictionary).
|
||||
* `--maxdict=#`:
|
||||
Limit dictionary to specified size (default: 112640).
|
||||
* `-B#`:
|
||||
Split input files in blocks of size # (default: no split)
|
||||
* `--dictID=#`:
|
||||
A dictionary ID is a locally unique ID that a decoder can use to verify it is
|
||||
using the right dictionary.
|
||||
|
|
|
@ -759,13 +759,13 @@ int main(int argCount, const char* argv[])
|
|||
int const optimize = !coverParams.k || !coverParams.d;
|
||||
coverParams.nbThreads = nbThreads;
|
||||
coverParams.zParams = zParams;
|
||||
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize);
|
||||
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, optimize);
|
||||
} else {
|
||||
ZDICT_legacy_params_t dictParams;
|
||||
memset(&dictParams, 0, sizeof(dictParams));
|
||||
dictParams.selectivityLevel = dictSelect;
|
||||
dictParams.zParams = zParams;
|
||||
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0);
|
||||
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, &dictParams, NULL, 0);
|
||||
}
|
||||
#endif
|
||||
goto _end;
|
||||
|
|
Loading…
Reference in New Issue