2018-08-26 19:29:12 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-present, Yann Collet, Facebook, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
|
|
* in the COPYING file in the root directory of this source tree).
|
|
|
|
* You may select, at your option, one of the above-listed licenses.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* largeNbDicts
|
|
|
|
* This is a benchmark test tool
|
|
|
|
* dedicated to the specific case of dictionary decompression
|
|
|
|
* using a very large nb of dictionaries
|
2018-08-28 15:47:07 -07:00
|
|
|
* thus suffering latency from lots of cache misses.
|
2018-08-26 19:29:12 -07:00
|
|
|
* It's created in a bid to investigate performance and find optimizations. */
|
|
|
|
|
|
|
|
|
|
|
|
/*--- Dependencies ---*/
|
|
|
|
|
|
|
|
#include <stddef.h> /* size_t */
|
2018-09-12 11:28:45 -07:00
|
|
|
#include <stdlib.h> /* malloc, free, abort */
|
|
|
|
#include <stdio.h> /* fprintf */
|
2018-08-26 19:29:12 -07:00
|
|
|
#include <assert.h> /* assert */
|
|
|
|
|
|
|
|
#include "util.h"
|
2018-11-13 11:01:59 -08:00
|
|
|
#include "benchfn.h"
|
2018-08-26 19:29:12 -07:00
|
|
|
#define ZSTD_STATIC_LINKING_ONLY
|
|
|
|
#include "zstd.h"
|
|
|
|
#include "zdict.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*--- Constants --- */
|
|
|
|
|
|
|
|
#define KB *(1<<10)
|
|
|
|
#define MB *(1<<20)
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
#define BLOCKSIZE_DEFAULT 0 /* no slicing into blocks */
|
2018-08-26 19:29:12 -07:00
|
|
|
#define DICTSIZE (4 KB)
|
2018-08-28 18:05:31 -07:00
|
|
|
#define CLEVEL_DEFAULT 3
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
#define BENCH_TIME_DEFAULT_S 6
|
|
|
|
#define RUN_TIME_DEFAULT_MS 1000
|
|
|
|
#define BENCH_TIME_DEFAULT_MS (BENCH_TIME_DEFAULT_S * RUN_TIME_DEFAULT_MS)
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
#define DISPLAY_LEVEL_DEFAULT 3
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
#define BENCH_SIZE_MAX (1200 MB)
|
|
|
|
|
|
|
|
|
|
|
|
/*--- Macros ---*/
|
2018-11-08 17:00:23 -08:00
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
#define CONTROL(c) { if (!(c)) abort(); }
|
2018-08-30 15:54:14 -07:00
|
|
|
#undef MIN
|
2018-09-12 11:28:45 -07:00
|
|
|
#define MIN(a,b) ((a) < (b) ? (a) : (b))
|
2018-08-30 11:02:08 -07:00
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
|
|
|
|
/*--- Display Macros ---*/
|
|
|
|
|
|
|
|
#define DISPLAY(...) fprintf(stdout, __VA_ARGS__)
|
|
|
|
#define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
|
|
|
|
static int g_displayLevel = DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */
|
|
|
|
|
|
|
|
|
|
|
|
/*--- buffer_t ---*/
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
void* ptr;
|
|
|
|
size_t size;
|
|
|
|
size_t capacity;
|
|
|
|
} buffer_t;
|
|
|
|
|
|
|
|
static const buffer_t kBuffNull = { NULL, 0, 0 };
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
/* @return : kBuffNull if any error */
|
|
|
|
static buffer_t createBuffer(size_t capacity)
|
2018-08-26 19:29:12 -07:00
|
|
|
{
|
2018-08-31 10:01:06 -07:00
|
|
|
assert(capacity > 0);
|
2018-08-30 11:02:08 -07:00
|
|
|
void* const ptr = malloc(capacity);
|
|
|
|
if (ptr==NULL) return kBuffNull;
|
|
|
|
|
|
|
|
buffer_t buffer;
|
|
|
|
buffer.ptr = ptr;
|
|
|
|
buffer.capacity = capacity;
|
|
|
|
buffer.size = 0;
|
|
|
|
return buffer;
|
2018-08-26 19:29:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void freeBuffer(buffer_t buff)
|
|
|
|
{
|
|
|
|
free(buff.ptr);
|
|
|
|
}
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
static void fillBuffer_fromHandle(buffer_t* buff, FILE* f)
|
|
|
|
{
|
|
|
|
size_t const readSize = fread(buff->ptr, 1, buff->capacity, f);
|
|
|
|
buff->size = readSize;
|
|
|
|
}
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
|
|
|
|
/* @return : kBuffNull if any error */
|
|
|
|
static buffer_t createBuffer_fromFile(const char* fileName)
|
|
|
|
{
|
|
|
|
U64 const fileSize = UTIL_getFileSize(fileName);
|
|
|
|
size_t const bufferSize = (size_t) fileSize;
|
|
|
|
|
|
|
|
if (fileSize == UTIL_FILESIZE_UNKNOWN) return kBuffNull;
|
|
|
|
assert((U64)bufferSize == fileSize); /* check overflow */
|
|
|
|
|
2018-08-31 10:01:06 -07:00
|
|
|
{ FILE* const f = fopen(fileName, "rb");
|
2018-08-26 19:29:12 -07:00
|
|
|
if (f == NULL) return kBuffNull;
|
|
|
|
|
2018-08-31 10:01:06 -07:00
|
|
|
buffer_t buff = createBuffer(bufferSize);
|
|
|
|
CONTROL(buff.ptr != NULL);
|
|
|
|
|
|
|
|
fillBuffer_fromHandle(&buff, f);
|
|
|
|
CONTROL(buff.size == buff.capacity);
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
fclose(f); /* do nothing specific if fclose() fails */
|
|
|
|
return buff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-31 10:01:06 -07:00
|
|
|
|
|
|
|
/* @return : kBuffNull if any error */
|
2018-08-30 11:02:08 -07:00
|
|
|
static buffer_t
|
|
|
|
createDictionaryBuffer(const char* dictionaryName,
|
|
|
|
const void* srcBuffer,
|
2018-08-30 16:24:44 -07:00
|
|
|
const size_t* srcBlockSizes, unsigned nbBlocks,
|
|
|
|
size_t requestedDictSize)
|
2018-08-30 11:02:08 -07:00
|
|
|
{
|
|
|
|
if (dictionaryName) {
|
|
|
|
DISPLAYLEVEL(3, "loading dictionary %s \n", dictionaryName);
|
2018-08-31 10:01:06 -07:00
|
|
|
return createBuffer_fromFile(dictionaryName); /* note : result might be kBuffNull */
|
2018-08-30 16:24:44 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
} else {
|
|
|
|
|
2018-08-30 16:24:44 -07:00
|
|
|
DISPLAYLEVEL(3, "creating dictionary, of target size %u bytes \n",
|
|
|
|
(unsigned)requestedDictSize);
|
|
|
|
void* const dictBuffer = malloc(requestedDictSize);
|
|
|
|
CONTROL(dictBuffer != NULL);
|
|
|
|
|
|
|
|
size_t const dictSize = ZDICT_trainFromBuffer(dictBuffer, requestedDictSize,
|
|
|
|
srcBuffer,
|
|
|
|
srcBlockSizes, nbBlocks);
|
|
|
|
CONTROL(!ZSTD_isError(dictSize));
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
buffer_t result;
|
|
|
|
result.ptr = dictBuffer;
|
2018-08-30 16:24:44 -07:00
|
|
|
result.capacity = requestedDictSize;
|
2018-08-30 11:02:08 -07:00
|
|
|
result.size = dictSize;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! BMK_loadFiles() :
|
|
|
|
* Loads `buffer`, with content from files listed within `fileNamesTable`.
|
|
|
|
* Fills `buffer` entirely.
|
|
|
|
* @return : 0 on success, !=0 on error */
|
|
|
|
static int loadFiles(void* buffer, size_t bufferSize,
|
|
|
|
size_t* fileSizes,
|
|
|
|
const char* const * fileNamesTable, unsigned nbFiles)
|
|
|
|
{
|
|
|
|
size_t pos = 0, totalSize = 0;
|
|
|
|
|
|
|
|
for (unsigned n=0; n<nbFiles; n++) {
|
|
|
|
U64 fileSize = UTIL_getFileSize(fileNamesTable[n]);
|
|
|
|
if (UTIL_isDirectory(fileNamesTable[n])) {
|
2018-08-31 10:01:06 -07:00
|
|
|
fileSizes[n] = 0;
|
2018-08-30 11:02:08 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (fileSize == UTIL_FILESIZE_UNKNOWN) {
|
2018-08-31 10:01:06 -07:00
|
|
|
fileSizes[n] = 0;
|
2018-08-30 11:02:08 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
FILE* const f = fopen(fileNamesTable[n], "rb");
|
|
|
|
assert(f!=NULL);
|
|
|
|
|
|
|
|
assert(pos <= bufferSize);
|
|
|
|
assert(fileSize <= bufferSize - pos);
|
|
|
|
|
|
|
|
{ size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f);
|
|
|
|
assert(readSize == fileSize);
|
|
|
|
pos += readSize;
|
|
|
|
}
|
|
|
|
fileSizes[n] = (size_t)fileSize;
|
|
|
|
totalSize += (size_t)fileSize;
|
|
|
|
fclose(f);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(totalSize == bufferSize);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
/*--- slice_collection_t ---*/
|
2018-08-26 19:29:12 -07:00
|
|
|
|
|
|
|
typedef struct {
|
2018-08-30 11:02:08 -07:00
|
|
|
void** slicePtrs;
|
2018-08-26 19:29:12 -07:00
|
|
|
size_t* capacities;
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t nbSlices;
|
|
|
|
} slice_collection_t;
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
static const slice_collection_t kNullCollection = { NULL, NULL, 0 };
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
static void freeSliceCollection(slice_collection_t collection)
|
2018-08-26 19:29:12 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
free(collection.slicePtrs);
|
2018-08-26 19:29:12 -07:00
|
|
|
free(collection.capacities);
|
|
|
|
}
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
/* shrinkSizes() :
|
|
|
|
* downsizes sizes of slices within collection, according to `newSizes`.
|
|
|
|
* every `newSizes` entry must be <= than its corresponding collection size */
|
|
|
|
void shrinkSizes(slice_collection_t collection,
|
|
|
|
const size_t* newSizes) /* presumed same size as collection */
|
2018-08-26 19:29:12 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t const nbSlices = collection.nbSlices;
|
|
|
|
for (size_t blockNb = 0; blockNb < nbSlices; blockNb++) {
|
|
|
|
assert(newSizes[blockNb] <= collection.capacities[blockNb]);
|
|
|
|
collection.capacities[blockNb] = newSizes[blockNb];
|
|
|
|
}
|
|
|
|
}
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
/* splitSlices() :
|
|
|
|
* nbSlices : if == 0, nbSlices is automatically determined from srcSlices and blockSize.
|
|
|
|
* otherwise, creates exactly nbSlices slices,
|
|
|
|
* by either truncating input (when smaller)
|
|
|
|
* or repeating input from beginning */
|
|
|
|
static slice_collection_t
|
|
|
|
splitSlices(slice_collection_t srcSlices, size_t blockSize, size_t nbSlices)
|
2018-08-30 11:02:08 -07:00
|
|
|
{
|
|
|
|
if (blockSize==0) blockSize = (size_t)(-1); /* means "do not cut" */
|
2018-09-12 11:28:45 -07:00
|
|
|
size_t nbSrcBlocks = 0;
|
2018-08-30 11:02:08 -07:00
|
|
|
for (size_t ssnb=0; ssnb < srcSlices.nbSlices; ssnb++) {
|
|
|
|
size_t pos = 0;
|
|
|
|
while (pos <= srcSlices.capacities[ssnb]) {
|
2018-09-12 11:28:45 -07:00
|
|
|
nbSrcBlocks++;
|
2018-08-30 11:02:08 -07:00
|
|
|
pos += blockSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
if (nbSlices == 0) nbSlices = nbSrcBlocks;
|
|
|
|
|
|
|
|
void** const sliceTable = (void**)malloc(nbSlices * sizeof(*sliceTable));
|
|
|
|
size_t* const capacities = (size_t*)malloc(nbSlices * sizeof(*capacities));
|
2018-08-30 11:02:08 -07:00
|
|
|
if (sliceTable == NULL || capacities == NULL) {
|
|
|
|
free(sliceTable);
|
2018-08-26 19:29:12 -07:00
|
|
|
free(capacities);
|
|
|
|
return kNullCollection;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
size_t ssnb = 0;
|
|
|
|
for (size_t sliceNb=0; sliceNb < nbSlices; ) {
|
|
|
|
ssnb = (ssnb + 1) % srcSlices.nbSlices;
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t pos = 0;
|
|
|
|
char* const ptr = (char*)srcSlices.slicePtrs[ssnb];
|
2018-09-12 11:28:45 -07:00
|
|
|
while (pos < srcSlices.capacities[ssnb] && sliceNb < nbSlices) {
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t const size = MIN(blockSize, srcSlices.capacities[ssnb] - pos);
|
2018-09-12 11:28:45 -07:00
|
|
|
sliceTable[sliceNb] = ptr + pos;
|
|
|
|
capacities[sliceNb] = size;
|
|
|
|
sliceNb++;
|
2018-08-30 11:02:08 -07:00
|
|
|
pos += blockSize;
|
|
|
|
}
|
2018-08-26 19:29:12 -07:00
|
|
|
}
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
slice_collection_t result;
|
2018-09-12 11:28:45 -07:00
|
|
|
result.nbSlices = nbSlices;
|
2018-08-30 11:02:08 -07:00
|
|
|
result.slicePtrs = sliceTable;
|
2018-08-26 19:29:12 -07:00
|
|
|
result.capacities = capacities;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
static size_t sliceCollection_totalCapacity(slice_collection_t sc)
|
2018-08-28 15:47:07 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t totalSize = 0;
|
|
|
|
for (size_t n=0; n<sc.nbSlices; n++)
|
|
|
|
totalSize += sc.capacities[n];
|
|
|
|
return totalSize;
|
2018-08-28 15:47:07 -07:00
|
|
|
}
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
/* --- buffer collection --- */
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
buffer_t buffer;
|
|
|
|
slice_collection_t slices;
|
|
|
|
} buffer_collection_t;
|
|
|
|
|
|
|
|
|
|
|
|
static void freeBufferCollection(buffer_collection_t bc)
|
2018-08-27 17:08:44 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
freeBuffer(bc.buffer);
|
|
|
|
freeSliceCollection(bc.slices);
|
|
|
|
}
|
2018-08-27 17:08:44 -07:00
|
|
|
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
static buffer_collection_t
|
|
|
|
createBufferCollection_fromSliceCollectionSizes(slice_collection_t sc)
|
|
|
|
{
|
|
|
|
size_t const bufferSize = sliceCollection_totalCapacity(sc);
|
|
|
|
|
|
|
|
buffer_t buffer = createBuffer(bufferSize);
|
|
|
|
CONTROL(buffer.ptr != NULL);
|
|
|
|
|
|
|
|
size_t const nbSlices = sc.nbSlices;
|
|
|
|
void** const slices = (void**)malloc(nbSlices * sizeof(*slices));
|
|
|
|
CONTROL(slices != NULL);
|
|
|
|
|
|
|
|
size_t* const capacities = (size_t*)malloc(nbSlices * sizeof(*capacities));
|
|
|
|
CONTROL(capacities != NULL);
|
|
|
|
|
|
|
|
char* const ptr = (char*)buffer.ptr;
|
|
|
|
size_t pos = 0;
|
|
|
|
for (size_t n=0; n < nbSlices; n++) {
|
|
|
|
capacities[n] = sc.capacities[n];
|
|
|
|
slices[n] = ptr + pos;
|
|
|
|
pos += capacities[n];
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer_collection_t result;
|
|
|
|
result.buffer = buffer;
|
|
|
|
result.slices.nbSlices = nbSlices;
|
|
|
|
result.slices.capacities = capacities;
|
|
|
|
result.slices.slicePtrs = slices;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* @return : kBuffNull if any error */
|
|
|
|
static buffer_collection_t
|
|
|
|
createBufferCollection_fromFiles(const char* const * fileNamesTable, unsigned nbFiles)
|
|
|
|
{
|
|
|
|
U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
|
2018-09-12 11:28:45 -07:00
|
|
|
assert(totalSizeToLoad != UTIL_FILESIZE_UNKNOWN);
|
2018-08-30 11:02:08 -07:00
|
|
|
assert(totalSizeToLoad <= BENCH_SIZE_MAX);
|
|
|
|
size_t const loadedSize = (size_t)totalSizeToLoad;
|
2018-08-31 10:01:06 -07:00
|
|
|
assert(loadedSize > 0);
|
2018-08-30 11:02:08 -07:00
|
|
|
void* const srcBuffer = malloc(loadedSize);
|
|
|
|
assert(srcBuffer != NULL);
|
|
|
|
|
|
|
|
assert(nbFiles > 0);
|
|
|
|
size_t* const fileSizes = (size_t*)calloc(nbFiles, sizeof(*fileSizes));
|
|
|
|
assert(fileSizes != NULL);
|
|
|
|
|
|
|
|
/* Load input buffer */
|
|
|
|
int const errorCode = loadFiles(srcBuffer, loadedSize,
|
|
|
|
fileSizes,
|
|
|
|
fileNamesTable, nbFiles);
|
|
|
|
assert(errorCode == 0);
|
|
|
|
|
|
|
|
void** sliceTable = (void**)malloc(nbFiles * sizeof(*sliceTable));
|
|
|
|
assert(sliceTable != NULL);
|
|
|
|
|
|
|
|
char* const ptr = (char*)srcBuffer;
|
|
|
|
size_t pos = 0;
|
|
|
|
unsigned fileNb = 0;
|
|
|
|
for ( ; (pos < loadedSize) && (fileNb < nbFiles); fileNb++) {
|
|
|
|
sliceTable[fileNb] = ptr + pos;
|
|
|
|
pos += fileSizes[fileNb];
|
2018-08-27 17:08:44 -07:00
|
|
|
}
|
2018-08-30 11:02:08 -07:00
|
|
|
assert(pos == loadedSize);
|
|
|
|
assert(fileNb == nbFiles);
|
|
|
|
|
|
|
|
|
|
|
|
buffer_t buffer;
|
|
|
|
buffer.ptr = srcBuffer;
|
|
|
|
buffer.capacity = loadedSize;
|
|
|
|
buffer.size = loadedSize;
|
|
|
|
|
|
|
|
slice_collection_t slices;
|
|
|
|
slices.slicePtrs = sliceTable;
|
|
|
|
slices.capacities = fileSizes;
|
|
|
|
slices.nbSlices = nbFiles;
|
|
|
|
|
|
|
|
buffer_collection_t bc;
|
|
|
|
bc.buffer = buffer;
|
|
|
|
bc.slices = slices;
|
|
|
|
return bc;
|
2018-08-27 17:08:44 -07:00
|
|
|
}
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
/*--- ddict_collection_t ---*/
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
ZSTD_DDict** ddicts;
|
|
|
|
size_t nbDDict;
|
|
|
|
} ddict_collection_t;
|
|
|
|
|
|
|
|
static const ddict_collection_t kNullDDictCollection = { NULL, 0 };
|
|
|
|
|
|
|
|
static void freeDDictCollection(ddict_collection_t ddictc)
|
|
|
|
{
|
|
|
|
for (size_t dictNb=0; dictNb < ddictc.nbDDict; dictNb++) {
|
|
|
|
ZSTD_freeDDict(ddictc.ddicts[dictNb]);
|
|
|
|
}
|
|
|
|
free(ddictc.ddicts);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns .buffers=NULL if operation fails */
|
|
|
|
static ddict_collection_t createDDictCollection(const void* dictBuffer, size_t dictSize, size_t nbDDict)
|
|
|
|
{
|
|
|
|
ZSTD_DDict** const ddicts = malloc(nbDDict * sizeof(ZSTD_DDict*));
|
2018-08-27 17:08:44 -07:00
|
|
|
assert(ddicts != NULL);
|
2018-08-26 19:29:12 -07:00
|
|
|
if (ddicts==NULL) return kNullDDictCollection;
|
|
|
|
for (size_t dictNb=0; dictNb < nbDDict; dictNb++) {
|
|
|
|
ddicts[dictNb] = ZSTD_createDDict(dictBuffer, dictSize);
|
|
|
|
assert(ddicts[dictNb] != NULL);
|
|
|
|
}
|
|
|
|
ddict_collection_t ddictc;
|
|
|
|
ddictc.ddicts = ddicts;
|
|
|
|
ddictc.nbDDict = nbDDict;
|
|
|
|
return ddictc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-28 15:47:07 -07:00
|
|
|
/* mess with adresses, so that linear scanning dictionaries != linear address scanning */
|
|
|
|
void shuffleDictionaries(ddict_collection_t dicts)
|
|
|
|
{
|
|
|
|
size_t const nbDicts = dicts.nbDDict;
|
2018-08-28 18:13:46 -07:00
|
|
|
for (size_t r=0; r<nbDicts; r++) {
|
|
|
|
size_t const d = rand() % nbDicts;
|
|
|
|
ZSTD_DDict* tmpd = dicts.ddicts[d];
|
|
|
|
dicts.ddicts[d] = dicts.ddicts[r];
|
|
|
|
dicts.ddicts[r] = tmpd;
|
|
|
|
}
|
2018-08-28 15:47:07 -07:00
|
|
|
for (size_t r=0; r<nbDicts; r++) {
|
|
|
|
size_t const d1 = rand() % nbDicts;
|
|
|
|
size_t const d2 = rand() % nbDicts;
|
|
|
|
ZSTD_DDict* tmpd = dicts.ddicts[d1];
|
|
|
|
dicts.ddicts[d1] = dicts.ddicts[d2];
|
|
|
|
dicts.ddicts[d2] = tmpd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
/* --- Compression --- */
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
/* compressBlocks() :
|
|
|
|
* @return : total compressed size of all blocks,
|
|
|
|
* or 0 if error.
|
|
|
|
*/
|
2018-08-28 15:47:07 -07:00
|
|
|
static size_t compressBlocks(size_t* cSizes, /* optional (can be NULL). If present, must contain at least nbBlocks fields */
|
2018-08-30 11:02:08 -07:00
|
|
|
slice_collection_t dstBlockBuffers,
|
|
|
|
slice_collection_t srcBlockBuffers,
|
2018-08-28 15:47:07 -07:00
|
|
|
ZSTD_CDict* cdict, int cLevel)
|
2018-08-27 17:08:44 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t const nbBlocks = srcBlockBuffers.nbSlices;
|
|
|
|
assert(dstBlockBuffers.nbSlices == srcBlockBuffers.nbSlices);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
|
|
|
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
|
|
|
|
assert(cctx != NULL);
|
|
|
|
|
|
|
|
size_t totalCSize = 0;
|
|
|
|
for (size_t blockNb=0; blockNb < nbBlocks; blockNb++) {
|
|
|
|
size_t cBlockSize;
|
|
|
|
if (cdict == NULL) {
|
|
|
|
cBlockSize = ZSTD_compressCCtx(cctx,
|
2018-08-30 11:02:08 -07:00
|
|
|
dstBlockBuffers.slicePtrs[blockNb], dstBlockBuffers.capacities[blockNb],
|
|
|
|
srcBlockBuffers.slicePtrs[blockNb], srcBlockBuffers.capacities[blockNb],
|
2018-08-27 17:08:44 -07:00
|
|
|
cLevel);
|
|
|
|
} else {
|
|
|
|
cBlockSize = ZSTD_compress_usingCDict(cctx,
|
2018-08-30 11:02:08 -07:00
|
|
|
dstBlockBuffers.slicePtrs[blockNb], dstBlockBuffers.capacities[blockNb],
|
|
|
|
srcBlockBuffers.slicePtrs[blockNb], srcBlockBuffers.capacities[blockNb],
|
2018-08-27 17:08:44 -07:00
|
|
|
cdict);
|
|
|
|
}
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(!ZSTD_isError(cBlockSize));
|
2018-08-28 15:47:07 -07:00
|
|
|
if (cSizes) cSizes[blockNb] = cBlockSize;
|
2018-08-27 17:08:44 -07:00
|
|
|
totalCSize += cBlockSize;
|
|
|
|
}
|
|
|
|
return totalCSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* --- Benchmark --- */
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-28 15:47:07 -07:00
|
|
|
typedef struct {
|
|
|
|
ZSTD_DCtx* dctx;
|
2018-08-28 18:05:31 -07:00
|
|
|
size_t nbDicts;
|
|
|
|
size_t dictNb;
|
2018-08-28 15:47:07 -07:00
|
|
|
ddict_collection_t dictionaries;
|
|
|
|
} decompressInstructions;
|
|
|
|
|
|
|
|
decompressInstructions createDecompressInstructions(ddict_collection_t dictionaries)
|
|
|
|
{
|
|
|
|
decompressInstructions di;
|
|
|
|
di.dctx = ZSTD_createDCtx();
|
|
|
|
assert(di.dctx != NULL);
|
2018-08-28 18:05:31 -07:00
|
|
|
di.nbDicts = dictionaries.nbDDict;
|
|
|
|
di.dictNb = 0;
|
2018-08-28 15:47:07 -07:00
|
|
|
di.dictionaries = dictionaries;
|
|
|
|
return di;
|
|
|
|
}
|
|
|
|
|
|
|
|
void freeDecompressInstructions(decompressInstructions di)
|
|
|
|
{
|
|
|
|
ZSTD_freeDCtx(di.dctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* benched function */
|
|
|
|
size_t decompress(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* payload)
|
|
|
|
{
|
|
|
|
decompressInstructions* const di = (decompressInstructions*) payload;
|
|
|
|
|
|
|
|
size_t const result = ZSTD_decompress_usingDDict(di->dctx,
|
|
|
|
dst, dstCapacity,
|
|
|
|
src, srcSize,
|
2018-08-28 18:05:31 -07:00
|
|
|
di->dictionaries.ddicts[di->dictNb]);
|
2018-08-28 15:47:07 -07:00
|
|
|
|
2018-08-28 18:05:31 -07:00
|
|
|
di->dictNb = di->dictNb + 1;
|
|
|
|
if (di->dictNb >= di->nbDicts) di->dictNb = 0;
|
2018-08-28 15:47:07 -07:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
static int benchMem(slice_collection_t dstBlocks,
|
|
|
|
slice_collection_t srcBlocks,
|
|
|
|
ddict_collection_t dictionaries,
|
|
|
|
int nbRounds)
|
2018-08-28 15:47:07 -07:00
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
assert(dstBlocks.nbSlices == srcBlocks.nbSlices);
|
|
|
|
|
|
|
|
unsigned const ms_per_round = RUN_TIME_DEFAULT_MS;
|
|
|
|
unsigned const total_time_ms = nbRounds * ms_per_round;
|
2018-08-28 15:47:07 -07:00
|
|
|
|
|
|
|
double bestSpeed = 0.;
|
|
|
|
|
|
|
|
BMK_timedFnState_t* const benchState =
|
2018-08-30 11:02:08 -07:00
|
|
|
BMK_createTimedFnState(total_time_ms, ms_per_round);
|
2018-08-28 15:47:07 -07:00
|
|
|
decompressInstructions di = createDecompressInstructions(dictionaries);
|
2018-11-13 13:05:39 -08:00
|
|
|
BMK_benchParams_t const bp = {
|
|
|
|
.benchFn = decompress,
|
|
|
|
.benchPayload = &di,
|
|
|
|
.initFn = NULL,
|
|
|
|
.initPayload = NULL,
|
|
|
|
.errorFn = ZSTD_isError,
|
|
|
|
.blockCount = dstBlocks.nbSlices,
|
|
|
|
.srcBuffers = (const void* const*) srcBlocks.slicePtrs,
|
|
|
|
.srcSizes = srcBlocks.capacities,
|
|
|
|
.dstBuffers = dstBlocks.slicePtrs,
|
|
|
|
.dstCapacities = dstBlocks.capacities,
|
|
|
|
.blockResults = NULL
|
|
|
|
};
|
2018-08-28 15:47:07 -07:00
|
|
|
|
|
|
|
for (;;) {
|
2018-11-13 13:05:39 -08:00
|
|
|
BMK_runOutcome_t const outcome = BMK_benchTimedFn(benchState, bp);
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(BMK_isSuccessful_runOutcome(outcome));
|
2018-08-28 15:47:07 -07:00
|
|
|
|
|
|
|
BMK_runTime_t const result = BMK_extract_runTime(outcome);
|
|
|
|
U64 const dTime_ns = result.nanoSecPerRun;
|
|
|
|
double const dTime_sec = (double)dTime_ns / 1000000000;
|
|
|
|
size_t const srcSize = result.sumOfReturn;
|
|
|
|
double const dSpeed_MBps = (double)srcSize / dTime_sec / (1 MB);
|
|
|
|
if (dSpeed_MBps > bestSpeed) bestSpeed = dSpeed_MBps;
|
|
|
|
DISPLAY("Decompression Speed : %.1f MB/s \r", bestSpeed);
|
2018-08-28 18:05:31 -07:00
|
|
|
fflush(stdout);
|
2018-08-28 15:47:07 -07:00
|
|
|
if (BMK_isCompleted_TimedFn(benchState)) break;
|
|
|
|
}
|
|
|
|
DISPLAY("\n");
|
|
|
|
|
|
|
|
freeDecompressInstructions(di);
|
|
|
|
BMK_freeTimedFnState(benchState);
|
|
|
|
|
|
|
|
return 0; /* success */
|
|
|
|
}
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
/*! bench() :
|
|
|
|
* fileName : file to load for benchmarking purpose
|
|
|
|
* dictionary : optional (can be NULL), file to load as dictionary,
|
2018-08-27 17:08:44 -07:00
|
|
|
* if none provided : will be calculated on the fly by the program.
|
2018-08-26 19:29:12 -07:00
|
|
|
* @return : 0 is success, 1+ otherwise */
|
2018-08-30 11:02:08 -07:00
|
|
|
int bench(const char** fileNameTable, unsigned nbFiles,
|
|
|
|
const char* dictionary,
|
2018-09-12 11:28:45 -07:00
|
|
|
size_t blockSize, int clevel,
|
|
|
|
unsigned nbDictMax, unsigned nbBlocks,
|
|
|
|
int nbRounds)
|
2018-08-26 19:29:12 -07:00
|
|
|
{
|
|
|
|
int result = 0;
|
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
DISPLAYLEVEL(3, "loading %u files... \n", nbFiles);
|
|
|
|
buffer_collection_t const srcs = createBufferCollection_fromFiles(fileNameTable, nbFiles);
|
|
|
|
CONTROL(srcs.buffer.ptr != NULL);
|
|
|
|
buffer_t srcBuffer = srcs.buffer;
|
2018-08-28 15:47:07 -07:00
|
|
|
size_t const srcSize = srcBuffer.size;
|
2018-08-26 19:29:12 -07:00
|
|
|
DISPLAYLEVEL(3, "created src buffer of size %.1f MB \n",
|
2018-08-28 15:47:07 -07:00
|
|
|
(double)srcSize / (1 MB));
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
slice_collection_t const srcSlices = splitSlices(srcs.slices, blockSize, nbBlocks);
|
|
|
|
nbBlocks = (unsigned)(srcSlices.nbSlices);
|
2018-08-30 11:02:08 -07:00
|
|
|
DISPLAYLEVEL(3, "split input into %u blocks ", nbBlocks);
|
|
|
|
if (blockSize)
|
|
|
|
DISPLAYLEVEL(3, "of max size %u bytes ", (unsigned)blockSize);
|
|
|
|
DISPLAYLEVEL(3, "\n");
|
2018-11-08 17:00:23 -08:00
|
|
|
size_t const totalSrcSlicesSize = sliceCollection_totalCapacity(srcSlices);
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
|
|
|
|
size_t* const dstCapacities = malloc(nbBlocks * sizeof(*dstCapacities));
|
|
|
|
CONTROL(dstCapacities != NULL);
|
|
|
|
size_t dstBufferCapacity = 0;
|
|
|
|
for (size_t bnb=0; bnb<nbBlocks; bnb++) {
|
|
|
|
dstCapacities[bnb] = ZSTD_compressBound(srcSlices.capacities[bnb]);
|
|
|
|
dstBufferCapacity += dstCapacities[bnb];
|
|
|
|
}
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
buffer_t dstBuffer = createBuffer(dstBufferCapacity);
|
|
|
|
CONTROL(dstBuffer.ptr != NULL);
|
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
void** const sliceTable = malloc(nbBlocks * sizeof(*sliceTable));
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(sliceTable != NULL);
|
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
{ char* const ptr = dstBuffer.ptr;
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t pos = 0;
|
|
|
|
for (size_t snb=0; snb < nbBlocks; snb++) {
|
|
|
|
sliceTable[snb] = ptr + pos;
|
|
|
|
pos += dstCapacities[snb];
|
|
|
|
} }
|
|
|
|
|
|
|
|
slice_collection_t dstSlices;
|
|
|
|
dstSlices.capacities = dstCapacities;
|
|
|
|
dstSlices.slicePtrs = sliceTable;
|
|
|
|
dstSlices.nbSlices = nbBlocks;
|
2018-08-26 19:29:12 -07:00
|
|
|
|
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
/* dictionary determination */
|
2018-08-30 11:02:08 -07:00
|
|
|
buffer_t const dictBuffer = createDictionaryBuffer(dictionary,
|
2018-11-08 17:00:23 -08:00
|
|
|
srcs.buffer.ptr,
|
|
|
|
srcs.slices.capacities, srcs.slices.nbSlices,
|
2018-08-30 16:24:44 -07:00
|
|
|
DICTSIZE);
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(dictBuffer.ptr != NULL);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-28 18:05:31 -07:00
|
|
|
ZSTD_CDict* const cdict = ZSTD_createCDict(dictBuffer.ptr, dictBuffer.size, clevel);
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(cdict != NULL);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t const cTotalSizeNoDict = compressBlocks(NULL, dstSlices, srcSlices, NULL, clevel);
|
|
|
|
CONTROL(cTotalSizeNoDict != 0);
|
2018-08-27 17:08:44 -07:00
|
|
|
DISPLAYLEVEL(3, "compressing at level %u without dictionary : Ratio=%.2f (%u bytes) \n",
|
2018-08-28 18:05:31 -07:00
|
|
|
clevel,
|
2018-11-08 17:00:23 -08:00
|
|
|
(double)totalSrcSlicesSize / cTotalSizeNoDict, (unsigned)cTotalSizeNoDict);
|
2018-08-28 15:47:07 -07:00
|
|
|
|
|
|
|
size_t* const cSizes = malloc(nbBlocks * sizeof(size_t));
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(cSizes != NULL);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
size_t const cTotalSize = compressBlocks(cSizes, dstSlices, srcSlices, cdict, clevel);
|
|
|
|
CONTROL(cTotalSize != 0);
|
2018-08-27 17:08:44 -07:00
|
|
|
DISPLAYLEVEL(3, "compressed using a %u bytes dictionary : Ratio=%.2f (%u bytes) \n",
|
|
|
|
(unsigned)dictBuffer.size,
|
2018-11-08 17:00:23 -08:00
|
|
|
(double)totalSrcSlicesSize / cTotalSize, (unsigned)cTotalSize);
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
/* now dstSlices contain the real compressed size of each block, instead of the maximum capacity */
|
|
|
|
shrinkSizes(dstSlices, cSizes);
|
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
size_t const dictMem = ZSTD_estimateDDictSize(dictBuffer.size, ZSTD_dlm_byCopy);
|
2018-08-28 18:05:31 -07:00
|
|
|
unsigned const nbDicts = nbDictMax ? nbDictMax : nbBlocks;
|
|
|
|
size_t const allDictMem = dictMem * nbDicts;
|
2018-08-26 19:29:12 -07:00
|
|
|
DISPLAYLEVEL(3, "generating %u dictionaries, using %.1f MB of memory \n",
|
2018-08-28 18:05:31 -07:00
|
|
|
nbDicts, (double)allDictMem / (1 MB));
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-28 18:05:31 -07:00
|
|
|
ddict_collection_t const dictionaries = createDDictCollection(dictBuffer.ptr, dictBuffer.size, nbDicts);
|
2018-08-30 11:02:08 -07:00
|
|
|
CONTROL(dictionaries.ddicts != NULL);
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-28 15:47:07 -07:00
|
|
|
shuffleDictionaries(dictionaries);
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
buffer_collection_t resultCollection = createBufferCollection_fromSliceCollectionSizes(srcSlices);
|
|
|
|
CONTROL(resultCollection.buffer.ptr != NULL);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
result = benchMem(resultCollection.slices, dstSlices, dictionaries, nbRounds);
|
2018-08-27 17:08:44 -07:00
|
|
|
|
2018-08-28 15:47:07 -07:00
|
|
|
/* free all heap objects in reverse order */
|
2018-08-30 11:02:08 -07:00
|
|
|
freeBufferCollection(resultCollection);
|
2018-08-27 17:08:44 -07:00
|
|
|
freeDDictCollection(dictionaries);
|
2018-08-28 15:47:07 -07:00
|
|
|
free(cSizes);
|
2018-08-27 17:08:44 -07:00
|
|
|
ZSTD_freeCDict(cdict);
|
|
|
|
freeBuffer(dictBuffer);
|
2018-08-30 11:02:08 -07:00
|
|
|
freeSliceCollection(dstSlices);
|
2018-08-26 19:29:12 -07:00
|
|
|
freeBuffer(dstBuffer);
|
2018-08-30 11:02:08 -07:00
|
|
|
freeSliceCollection(srcSlices);
|
|
|
|
freeBufferCollection(srcs);
|
2018-08-26 19:29:12 -07:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-08-28 15:47:07 -07:00
|
|
|
/* --- Command Line --- */
|
2018-08-26 19:29:12 -07:00
|
|
|
|
2018-08-28 18:05:31 -07:00
|
|
|
/*! readU32FromChar() :
|
|
|
|
* @return : unsigned integer value read from input in `char` format.
|
|
|
|
* allows and interprets K, KB, KiB, M, MB and MiB suffix.
|
|
|
|
* Will also modify `*stringPtr`, advancing it to position where it stopped reading.
|
|
|
|
* Note : function will exit() program if digit sequence overflows */
|
|
|
|
static unsigned readU32FromChar(const char** stringPtr)
|
|
|
|
{
|
|
|
|
unsigned result = 0;
|
|
|
|
while ((**stringPtr >='0') && (**stringPtr <='9')) {
|
|
|
|
unsigned const max = (((unsigned)(-1)) / 10) - 1;
|
|
|
|
assert(result <= max); /* check overflow */
|
|
|
|
result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;
|
|
|
|
}
|
|
|
|
if ((**stringPtr=='K') || (**stringPtr=='M')) {
|
|
|
|
unsigned const maxK = ((unsigned)(-1)) >> 10;
|
|
|
|
assert(result <= maxK); /* check overflow */
|
|
|
|
result <<= 10;
|
|
|
|
if (**stringPtr=='M') {
|
|
|
|
assert(result <= maxK); /* check overflow */
|
|
|
|
result <<= 10;
|
|
|
|
}
|
|
|
|
(*stringPtr)++; /* skip `K` or `M` */
|
|
|
|
if (**stringPtr=='i') (*stringPtr)++;
|
|
|
|
if (**stringPtr=='B') (*stringPtr)++;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** longCommandWArg() :
|
|
|
|
* check if *stringPtr is the same as longCommand.
|
|
|
|
* If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand.
|
|
|
|
* @return 0 and doesn't modify *stringPtr otherwise.
|
|
|
|
*/
|
|
|
|
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
|
|
|
|
{
|
|
|
|
size_t const comSize = strlen(longCommand);
|
|
|
|
int const result = !strncmp(*stringPtr, longCommand, comSize);
|
|
|
|
if (result) *stringPtr += comSize;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-04 14:57:45 -07:00
|
|
|
int usage(const char* exeName)
|
|
|
|
{
|
|
|
|
DISPLAY (" \n");
|
|
|
|
DISPLAY (" %s [Options] filename(s) \n", exeName);
|
|
|
|
DISPLAY (" \n");
|
|
|
|
DISPLAY ("Options : \n");
|
|
|
|
DISPLAY ("-r : recursively load all files in subdirectories (default: off) \n");
|
|
|
|
DISPLAY ("-B# : split input into blocks of size # (default: no split) \n");
|
|
|
|
DISPLAY ("-# : use compression level # (default: %u) \n", CLEVEL_DEFAULT);
|
|
|
|
DISPLAY ("-D # : use # as a dictionary (default: create one) \n");
|
|
|
|
DISPLAY ("-i# : nb benchmark rounds (default: %u) \n", BENCH_TIME_DEFAULT_S);
|
2018-09-12 11:28:45 -07:00
|
|
|
DISPLAY ("--nbBlocks=#: use # blocks for bench (default: one per file) \n");
|
2018-09-04 14:57:45 -07:00
|
|
|
DISPLAY ("--nbDicts=# : create # dictionaries for bench (default: one per block) \n");
|
|
|
|
DISPLAY ("-h : help (this text) \n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-26 19:29:12 -07:00
|
|
|
int bad_usage(const char* exeName)
|
|
|
|
{
|
|
|
|
DISPLAY (" bad usage : \n");
|
2018-09-04 14:57:45 -07:00
|
|
|
usage(exeName);
|
2018-08-26 19:29:12 -07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int main (int argc, const char** argv)
|
|
|
|
{
|
2018-08-30 11:02:08 -07:00
|
|
|
int recursiveMode = 0;
|
|
|
|
int nbRounds = BENCH_TIME_DEFAULT_S;
|
2018-08-26 19:29:12 -07:00
|
|
|
const char* const exeName = argv[0];
|
|
|
|
|
2018-08-27 17:08:44 -07:00
|
|
|
if (argc < 2) return bad_usage(exeName);
|
2018-08-30 11:02:08 -07:00
|
|
|
|
|
|
|
const char** nameTable = (const char**)malloc(argc * sizeof(const char*));
|
|
|
|
assert(nameTable != NULL);
|
|
|
|
unsigned nameIdx = 0;
|
2018-08-27 17:08:44 -07:00
|
|
|
|
|
|
|
const char* dictionary = NULL;
|
2018-08-28 18:13:46 -07:00
|
|
|
int cLevel = CLEVEL_DEFAULT;
|
|
|
|
size_t blockSize = BLOCKSIZE_DEFAULT;
|
2018-09-16 21:09:08 -07:00
|
|
|
unsigned nbDicts = 0; /* determine nbDicts automatically: 1 dictionary per block */
|
|
|
|
unsigned nbBlocks = 0; /* determine nbBlocks automatically, from source and blockSize */
|
2018-08-28 18:13:46 -07:00
|
|
|
|
2018-08-30 11:02:08 -07:00
|
|
|
for (int argNb = 1; argNb < argc ; argNb++) {
|
2018-08-28 18:05:31 -07:00
|
|
|
const char* argument = argv[argNb];
|
2018-09-05 14:33:51 -07:00
|
|
|
if (!strcmp(argument, "-h")) { free(nameTable); return usage(exeName); }
|
2018-08-30 11:02:08 -07:00
|
|
|
if (!strcmp(argument, "-r")) { recursiveMode = 1; continue; }
|
|
|
|
if (!strcmp(argument, "-D")) { argNb++; assert(argNb < argc); dictionary = argv[argNb]; continue; }
|
|
|
|
if (longCommandWArg(&argument, "-i")) { nbRounds = readU32FromChar(&argument); continue; }
|
2018-08-28 18:05:31 -07:00
|
|
|
if (longCommandWArg(&argument, "--dictionary=")) { dictionary = argument; continue; }
|
2018-08-30 11:02:08 -07:00
|
|
|
if (longCommandWArg(&argument, "-B")) { blockSize = readU32FromChar(&argument); continue; }
|
|
|
|
if (longCommandWArg(&argument, "--blockSize=")) { blockSize = readU32FromChar(&argument); continue; }
|
2018-08-28 18:05:31 -07:00
|
|
|
if (longCommandWArg(&argument, "--nbDicts=")) { nbDicts = readU32FromChar(&argument); continue; }
|
2018-09-12 11:28:45 -07:00
|
|
|
if (longCommandWArg(&argument, "--nbBlocks=")) { nbBlocks = readU32FromChar(&argument); continue; }
|
2018-08-30 11:02:08 -07:00
|
|
|
if (longCommandWArg(&argument, "--clevel=")) { cLevel = readU32FromChar(&argument); continue; }
|
|
|
|
if (longCommandWArg(&argument, "-")) { cLevel = readU32FromChar(&argument); continue; }
|
|
|
|
/* anything that's not a command is a filename */
|
|
|
|
nameTable[nameIdx++] = argument;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char** filenameTable = nameTable;
|
|
|
|
unsigned nbFiles = nameIdx;
|
|
|
|
char* buffer_containing_filenames = NULL;
|
|
|
|
|
|
|
|
if (recursiveMode) {
|
|
|
|
#ifndef UTIL_HAS_CREATEFILELIST
|
|
|
|
assert(0); /* missing capability, do not run */
|
|
|
|
#endif
|
|
|
|
filenameTable = UTIL_createFileList(nameTable, nameIdx, &buffer_containing_filenames, &nbFiles, 1 /* follow_links */);
|
2018-08-27 17:08:44 -07:00
|
|
|
}
|
|
|
|
|
2018-09-12 11:28:45 -07:00
|
|
|
int result = bench(filenameTable, nbFiles, dictionary, blockSize, cLevel, nbDicts, nbBlocks, nbRounds);
|
2018-08-31 10:01:06 -07:00
|
|
|
|
|
|
|
free(buffer_containing_filenames);
|
|
|
|
free(nameTable);
|
|
|
|
|
|
|
|
return result;
|
2018-08-26 19:29:12 -07:00
|
|
|
}
|