diff --git a/contrib/adaptive-compression/Makefile b/contrib/adaptive-compression/Makefile new file mode 100644 index 00000000..9bc19ee1 --- /dev/null +++ b/contrib/adaptive-compression/Makefile @@ -0,0 +1,47 @@ + +ZSTDDIR = ../../lib +PRGDIR = ../../programs +ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c +ZSTDCOMP_FILES := $(ZSTDDIR)/compress/*.c +ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c +ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) + +DEBUGFLAGS= -g -DZSTD_DEBUG=1 +CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ + -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) +CFLAGS ?= -O3 +CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wformat-security \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) +CFLAGS += $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) + +all: adapt datagen + +adapt: $(ZSTD_FILES) adapt.c + $(CC) $(FLAGS) $^ -o $@ + +adapt-debug: $(ZSTD_FILES) adapt.c + $(CC) $(FLAGS) -DDEBUG_MODE=2 $^ -o adapt + +datagen : $(PRGDIR)/datagen.c datagencli.c + $(CC) $(FLAGS) $^ -o $@$(EXT) + +test-adapt-correctness: datagen adapt + @./test-correctness.sh + @echo "test correctness complete" + +test-adapt-performance: datagen adapt + @./test-performance.sh + @echo "test performance complete" + +clean: + @$(RM) -f adapt datagen + @$(RM) -rf *.dSYM + @$(RM) -f tmp* + @$(RM) -f tests/*.zst + @$(RM) -f tests/tmp* + @echo "finished cleaning" diff --git a/contrib/adaptive-compression/README.md b/contrib/adaptive-compression/README.md new file mode 100644 index 00000000..fadb071f --- /dev/null +++ b/contrib/adaptive-compression/README.md @@ -0,0 +1,25 @@ +###Summary + +`adapt` is a new compression tool targeted at optimizing performance across network connections. The tool aims at sensing network speeds and adapting compression level based on network or pipe speeds. +In situations where the compression level does not appropriately match the network/pipe speed, the compression may be bottlenecking the entire pipeline or the files may not be compressed as much as they potentially could be, therefore losing efficiency. It also becomes quite impractical to manually measure and set compression level, therefore the tool does it for you. + +###Using `adapt` + +In order to build and use the tool, you can simply run `make adapt` in the `adaptive-compression` directory under `contrib`. This will generate an executable available for use. + +###Options +`-oFILE` : write output to `FILE` + +`-i#` : provide initial compression level + +`-h` : display help/information + +`-f` : force the compression level to stay constant + +`-c` : force write to `stdout` + +`-p` : hide progress bar + +`-q` : quiet mode -- do not show progress bar or other information + +###Benchmarking / Test results diff --git a/contrib/adaptive-compression/adapt.c b/contrib/adaptive-compression/adapt.c new file mode 100644 index 00000000..5cf3b970 --- /dev/null +++ b/contrib/adaptive-compression/adapt.c @@ -0,0 +1,1068 @@ +/** + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include /* fprintf */ +#include /* malloc, free */ +#include /* pthread functions */ +#include /* memset */ +#include "zstd_internal.h" +#include "util.h" + +#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) +#define PRINT(...) fprintf(stdout, __VA_ARGS__) +#define DEBUG(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } } +#define FILE_CHUNK_SIZE 4 << 20 +#define MAX_NUM_JOBS 2 +#define stdinmark "/*stdin*\\" +#define stdoutmark "/*stdout*\\" +#define MAX_PATH 256 +#define DEFAULT_DISPLAY_LEVEL 1 +#define DEFAULT_COMPRESSION_LEVEL 6 +#define MAX_COMPRESSION_LEVEL_CHANGE 2 +#define CONVERGENCE_LOWER_BOUND 5 +#define CLEVEL_DECREASE_COOLDOWN 5 +#define CHANGE_BY_TWO_THRESHOLD 0.1 +#define CHANGE_BY_ONE_THRESHOLD 0.65 + +#ifndef DEBUG_MODE +static int g_displayLevel = DEFAULT_DISPLAY_LEVEL; +#else +static int g_displayLevel = DEBUG_MODE; +#endif + +static unsigned g_compressionLevel = DEFAULT_COMPRESSION_LEVEL; +static UTIL_time_t g_startTime; +static size_t g_streamedSize = 0; +static unsigned g_useProgressBar = 1; +static UTIL_freq_t g_ticksPerSecond; +static unsigned g_forceCompressionLevel = 0; + +typedef struct { + void* start; + size_t size; + size_t capacity; +} buffer_t; + +typedef struct { + size_t filled; + buffer_t buffer; +} inBuff_t; + +typedef struct { + buffer_t src; + buffer_t dst; + unsigned compressionLevel; + unsigned jobID; + unsigned lastJobPlusOne; + size_t compressedSize; + size_t dictSize; +} jobDescription; + +typedef struct { + pthread_mutex_t pMutex; + int noError; +} mutex_t; + +typedef struct { + pthread_cond_t pCond; + int noError; +} cond_t; + +typedef struct { + unsigned compressionLevel; + unsigned numActiveThreads; + unsigned numJobs; + unsigned nextJobID; + unsigned threadError; + + /* + * JobIDs for the next jobs to be created, compressed, and written + */ + unsigned jobReadyID; + unsigned jobCompressedID; + unsigned jobWriteID; + unsigned allJobsCompleted; + + /* + * counter for how many jobs in a row the compression level has not changed + * if the counter becomes >= CONVERGENCE_LOWER_BOUND, the next time the + * compression level tries to change (by non-zero amount) resets the counter + * to 1 and does not apply the change + */ + unsigned convergenceCounter; + + /* + * cooldown counter in order to prevent rapid successive decreases in compression level + * whenever compression level is decreased, cooldown is set to CLEVEL_DECREASE_COOLDOWN + * whenever adaptCompressionLevel() is called and cooldown != 0, it is decremented + * as long as cooldown != 0, the compression level cannot be decreased + */ + unsigned cooldown; + + /* + * XWaitYCompletion + * Range from 0.0 to 1.0 + * if the value is not 1.0, then this implies that thread X waited on thread Y to finish + * and thread Y was XWaitYCompletion finished at the time of the wait (i.e. compressWaitWriteCompletion=0.5 + * implies that the compression thread waited on the write thread and it was only 50% finished writing a job) + */ + double createWaitCompressionCompletion; + double compressWaitCreateCompletion; + double compressWaitWriteCompletion; + double writeWaitCompressionCompletion; + + /* + * Completion values + * Range from 0.0 to 1.0 + * Jobs are divided into mini-chunks in order to measure completion + * these values are updated each time a thread finishes its operation on the + * mini-chunk (i.e. finishes writing out, compressing, etc. this mini-chunk). + */ + double compressionCompletion; + double writeCompletion; + double createCompletion; + + mutex_t jobCompressed_mutex; + cond_t jobCompressed_cond; + mutex_t jobReady_mutex; + cond_t jobReady_cond; + mutex_t allJobsCompleted_mutex; + cond_t allJobsCompleted_cond; + mutex_t jobWrite_mutex; + cond_t jobWrite_cond; + mutex_t compressionCompletion_mutex; + mutex_t createCompletion_mutex; + mutex_t writeCompletion_mutex; + size_t lastDictSize; + inBuff_t input; + jobDescription* jobs; + ZSTD_CCtx* cctx; +} adaptCCtx; + +typedef struct { + adaptCCtx* ctx; + FILE* dstFile; +} outputThreadArg; + +typedef struct { + FILE* srcFile; + adaptCCtx* ctx; + outputThreadArg* otArg; +} fcResources; + +static void freeCompressionJobs(adaptCCtx* ctx) +{ + unsigned u; + for (u=0; unumJobs; u++) { + jobDescription job = ctx->jobs[u]; + free(job.dst.start); + free(job.src.start); + } +} + +static int destroyMutex(mutex_t* mutex) +{ + if (mutex->noError) { + int const ret = pthread_mutex_destroy(&mutex->pMutex); + return ret; + } + return 0; +} + +static int destroyCond(cond_t* cond) +{ + if (cond->noError) { + int const ret = pthread_cond_destroy(&cond->pCond); + return ret; + } + return 0; +} + +static int freeCCtx(adaptCCtx* ctx) +{ + if (!ctx) return 0; + { + int error = 0; + error |= destroyMutex(&ctx->jobCompressed_mutex); + error |= destroyCond(&ctx->jobCompressed_cond); + error |= destroyMutex(&ctx->jobReady_mutex); + error |= destroyCond(&ctx->jobReady_cond); + error |= destroyMutex(&ctx->allJobsCompleted_mutex); + error |= destroyCond(&ctx->allJobsCompleted_cond); + error |= destroyMutex(&ctx->jobWrite_mutex); + error |= destroyCond(&ctx->jobWrite_cond); + error |= destroyMutex(&ctx->compressionCompletion_mutex); + error |= destroyMutex(&ctx->createCompletion_mutex); + error |= destroyMutex(&ctx->writeCompletion_mutex); + error |= ZSTD_isError(ZSTD_freeCCtx(ctx->cctx)); + free(ctx->input.buffer.start); + if (ctx->jobs){ + freeCompressionJobs(ctx); + free(ctx->jobs); + } + free(ctx); + return error; + } +} + +static int initMutex(mutex_t* mutex) +{ + int const ret = pthread_mutex_init(&mutex->pMutex, NULL); + mutex->noError = !ret; + return ret; +} + +static int initCond(cond_t* cond) +{ + int const ret = pthread_cond_init(&cond->pCond, NULL); + cond->noError = !ret; + return ret; +} + +static int initCCtx(adaptCCtx* ctx, unsigned numJobs) +{ + ctx->compressionLevel = g_compressionLevel; + { + int pthreadError = 0; + pthreadError |= initMutex(&ctx->jobCompressed_mutex); + pthreadError |= initCond(&ctx->jobCompressed_cond); + pthreadError |= initMutex(&ctx->jobReady_mutex); + pthreadError |= initCond(&ctx->jobReady_cond); + pthreadError |= initMutex(&ctx->allJobsCompleted_mutex); + pthreadError |= initCond(&ctx->allJobsCompleted_cond); + pthreadError |= initMutex(&ctx->jobWrite_mutex); + pthreadError |= initCond(&ctx->jobWrite_cond); + pthreadError |= initMutex(&ctx->compressionCompletion_mutex); + pthreadError |= initMutex(&ctx->createCompletion_mutex); + pthreadError |= initMutex(&ctx->writeCompletion_mutex); + if (pthreadError) return pthreadError; + } + ctx->numJobs = numJobs; + ctx->jobReadyID = 0; + ctx->jobCompressedID = 0; + ctx->jobWriteID = 0; + ctx->lastDictSize = 0; + + + ctx->createWaitCompressionCompletion = 1; + ctx->compressWaitCreateCompletion = 1; + ctx->compressWaitWriteCompletion = 1; + ctx->writeWaitCompressionCompletion = 1; + ctx->createCompletion = 1; + ctx->writeCompletion = 1; + ctx->compressionCompletion = 1; + ctx->convergenceCounter = 0; + ctx->cooldown = 0; + + ctx->jobs = calloc(1, numJobs*sizeof(jobDescription)); + + if (!ctx->jobs) { + DISPLAY("Error: could not allocate space for jobs during context creation\n"); + return 1; + } + + /* initializing jobs */ + { + unsigned jobNum; + for (jobNum=0; jobNumjobs[jobNum]; + job->src.start = malloc(2 * FILE_CHUNK_SIZE); + job->dst.start = malloc(ZSTD_compressBound(FILE_CHUNK_SIZE)); + job->lastJobPlusOne = 0; + if (!job->src.start || !job->dst.start) { + DISPLAY("Could not allocate buffers for jobs\n"); + return 1; + } + job->src.capacity = FILE_CHUNK_SIZE; + job->dst.capacity = ZSTD_compressBound(FILE_CHUNK_SIZE); + } + } + + ctx->nextJobID = 0; + ctx->threadError = 0; + ctx->allJobsCompleted = 0; + + ctx->cctx = ZSTD_createCCtx(); + if (!ctx->cctx) { + DISPLAY("Error: could not allocate ZSTD_CCtx\n"); + return 1; + } + + ctx->input.filled = 0; + ctx->input.buffer.capacity = 2 * FILE_CHUNK_SIZE; + + ctx->input.buffer.start = malloc(ctx->input.buffer.capacity); + if (!ctx->input.buffer.start) { + DISPLAY("Error: could not allocate input buffer\n"); + return 1; + } + return 0; +} + +static adaptCCtx* createCCtx(unsigned numJobs) +{ + + adaptCCtx* const ctx = calloc(1, sizeof(adaptCCtx)); + if (ctx == NULL) { + DISPLAY("Error: could not allocate space for context\n"); + return NULL; + } + { + int const error = initCCtx(ctx, numJobs); + if (error) { + freeCCtx(ctx); + return NULL; + } + return ctx; + } +} + +static void signalErrorToThreads(adaptCCtx* ctx) +{ + ctx->threadError = 1; + pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); + pthread_cond_signal(&ctx->jobReady_cond.pCond); + pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); + + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + pthread_cond_signal(&ctx->jobCompressed_cond.pCond); + pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); + + pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); + pthread_cond_signal(&ctx->jobWrite_cond.pCond); + pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); + + pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); + pthread_cond_signal(&ctx->allJobsCompleted_cond.pCond); + pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); +} + +static void waitUntilAllJobsCompleted(adaptCCtx* ctx) +{ + if (!ctx) return; + pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); + while (ctx->allJobsCompleted == 0 && !ctx->threadError) { + pthread_cond_wait(&ctx->allJobsCompleted_cond.pCond, &ctx->allJobsCompleted_mutex.pMutex); + } + pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); +} + +/* map completion percentages to values for changing compression level */ +static unsigned convertCompletionToChange(double completion) +{ + if (completion < CHANGE_BY_TWO_THRESHOLD) { + return 2; + } + else if (completion < CHANGE_BY_ONE_THRESHOLD) { + return 1; + } + else { + return 0; + } +} + +/* + * Compression level is changed depending on which part of the compression process is lagging + * Currently, three theads exist for job creation, compression, and file writing respectively. + * adaptCompressionLevel() increments or decrements compression level based on which of the threads is lagging + * job creation or file writing lag => increased compression level + * compression thread lag => decreased compression level + * detecting which thread is lagging is done by keeping track of how many calls each thread makes to pthread_cond_wait + */ +static void adaptCompressionLevel(adaptCCtx* ctx) +{ + double createWaitCompressionCompletion; + double compressWaitCreateCompletion; + double compressWaitWriteCompletion; + double writeWaitCompressionCompletion; + double const threshold = 0.00001; + unsigned const prevCompressionLevel = ctx->compressionLevel; + + + if (g_forceCompressionLevel) { + ctx->compressionLevel = g_compressionLevel; + return; + } + + + DEBUG(2, "adapting compression level %u\n", ctx->compressionLevel); + + /* read and reset completion measurements */ + pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); + DEBUG(2, "createWaitCompressionCompletion %f\n", ctx->createWaitCompressionCompletion); + DEBUG(2, "writeWaitCompressionCompletion %f\n", ctx->writeWaitCompressionCompletion); + createWaitCompressionCompletion = ctx->createWaitCompressionCompletion; + writeWaitCompressionCompletion = ctx->writeWaitCompressionCompletion; + pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); + + pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); + DEBUG(2, "compressWaitWriteCompletion %f\n", ctx->compressWaitWriteCompletion); + compressWaitWriteCompletion = ctx->compressWaitWriteCompletion; + pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); + + pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); + DEBUG(2, "compressWaitCreateCompletion %f\n", ctx->compressWaitCreateCompletion); + compressWaitCreateCompletion = ctx->compressWaitCreateCompletion; + pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); + DEBUG(2, "convergence counter: %u\n", ctx->convergenceCounter); + + /* adaptation logic */ + if (ctx->cooldown) ctx->cooldown--; + + if ((1-createWaitCompressionCompletion > threshold || 1-writeWaitCompressionCompletion > threshold) && ctx->cooldown == 0) { + /* create or write waiting on compression */ + /* use whichever one waited less because it was slower */ + double const completion = MAX(createWaitCompressionCompletion, writeWaitCompressionCompletion); + unsigned const change = convertCompletionToChange(completion); + unsigned const boundChange = MIN(change, ctx->compressionLevel - 1); + if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) { + /* reset convergence counter, might have been a spike */ + ctx->convergenceCounter = 0; + DEBUG(2, "convergence counter reset, no change applied\n"); + } + else if (boundChange != 0) { + ctx->compressionLevel -= boundChange; + ctx->cooldown = CLEVEL_DECREASE_COOLDOWN; + ctx->convergenceCounter = 1; + + DEBUG(2, "create or write threads waiting on compression, tried to decrease compression level by %u\n\n", boundChange); + } + } + else if (1-compressWaitWriteCompletion > threshold || 1-compressWaitCreateCompletion > threshold) { + /* compress waiting on write */ + double const completion = MIN(compressWaitWriteCompletion, compressWaitCreateCompletion); + unsigned const change = convertCompletionToChange(completion); + unsigned const boundChange = MIN(change, ZSTD_maxCLevel() - ctx->compressionLevel); + if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) { + /* reset convergence counter, might have been a spike */ + ctx->convergenceCounter = 0; + DEBUG(2, "convergence counter reset, no change applied\n"); + } + else if (boundChange != 0) { + ctx->compressionLevel += boundChange; + ctx->cooldown = 0; + ctx->convergenceCounter = 1; + + DEBUG(2, "compress waiting on write or create, tried to increase compression level by %u\n\n", boundChange); + } + + } + + if (ctx->compressionLevel == prevCompressionLevel) { + ctx->convergenceCounter++; + } +} + +static size_t getUseableDictSize(unsigned compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); + unsigned const overlapLog = compressionLevel >= (unsigned)ZSTD_maxCLevel() ? 0 : 3; + size_t const overlapSize = 1 << (params.cParams.windowLog - overlapLog); + return overlapSize; +} + +static void* compressionThread(void* arg) +{ + adaptCCtx* const ctx = (adaptCCtx*)arg; + unsigned currJob = 0; + for ( ; ; ) { + unsigned const currJobIndex = currJob % ctx->numJobs; + jobDescription* const job = &ctx->jobs[currJobIndex]; + DEBUG(2, "starting compression for job %u\n", currJob); + + { + /* check if compression thread will have to wait */ + unsigned willWaitForCreate = 0; + unsigned willWaitForWrite = 0; + + pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); + if (currJob + 1 > ctx->jobReadyID) willWaitForCreate = 1; + pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); + + pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); + if (currJob - ctx->jobWriteID >= ctx->numJobs) willWaitForWrite = 1; + pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); + + + pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); + if (willWaitForCreate) { + DEBUG(2, "compression will wait for create on job %u\n", currJob); + ctx->compressWaitCreateCompletion = ctx->createCompletion; + DEBUG(2, "create completion %f\n", ctx->compressWaitCreateCompletion); + + } + else { + ctx->compressWaitCreateCompletion = 1; + } + pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); + + pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); + if (willWaitForWrite) { + DEBUG(2, "compression will wait for write on job %u\n", currJob); + ctx->compressWaitWriteCompletion = ctx->writeCompletion; + DEBUG(2, "write completion %f\n", ctx->compressWaitWriteCompletion); + } + else { + ctx->compressWaitWriteCompletion = 1; + } + pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); + + } + + /* wait until job is ready */ + pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); + while (currJob + 1 > ctx->jobReadyID && !ctx->threadError) { + pthread_cond_wait(&ctx->jobReady_cond.pCond, &ctx->jobReady_mutex.pMutex); + } + pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); + + /* wait until job previously in this space is written */ + pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); + while (currJob - ctx->jobWriteID >= ctx->numJobs && !ctx->threadError) { + pthread_cond_wait(&ctx->jobWrite_cond.pCond, &ctx->jobWrite_mutex.pMutex); + } + pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); + /* reset compression completion */ + pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); + ctx->compressionCompletion = 0; + pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); + + /* adapt compression level */ + if (currJob) adaptCompressionLevel(ctx); + + DEBUG(2, "job %u compressed with level %u\n", currJob, ctx->compressionLevel); + /* compress the data */ + { + size_t const compressionBlockSize = ZSTD_BLOCKSIZE_MAX; /* 128 KB */ + unsigned const cLevel = ctx->compressionLevel; + unsigned blockNum = 0; + size_t remaining = job->src.size; + size_t srcPos = 0; + size_t dstPos = 0; + /* reset compressed size */ + job->compressedSize = 0; + DEBUG(2, "calling ZSTD_compressBegin()\n"); + /* begin compression */ + { + size_t const useDictSize = MIN(getUseableDictSize(cLevel), job->dictSize); + size_t const dictModeError = ZSTD_setCCtxParameter(ctx->cctx, ZSTD_p_forceRawDict, 1); + ZSTD_parameters params = ZSTD_getParams(cLevel, 0, useDictSize); + params.cParams.windowLog = 23; + { + size_t const initError = ZSTD_compressBegin_advanced(ctx->cctx, job->src.start + job->dictSize - useDictSize, useDictSize, params, 0); + size_t const windowSizeError = ZSTD_setCCtxParameter(ctx->cctx, ZSTD_p_forceWindow, 1); + if (ZSTD_isError(dictModeError) || ZSTD_isError(initError) || ZSTD_isError(windowSizeError)) { + DISPLAY("Error: something went wrong while starting compression\n"); + signalErrorToThreads(ctx); + return arg; + } + } + } + DEBUG(2, "finished with ZSTD_compressBegin()\n"); + + do { + size_t const actualBlockSize = MIN(remaining, compressionBlockSize); + + /* continue compression */ + if (currJob != 0 || blockNum != 0) { /* not first block of first job flush/overwrite the frame header */ + size_t const hSize = ZSTD_compressContinue(ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, 0); + if (ZSTD_isError(hSize)) { + DISPLAY("Error: something went wrong while continuing compression\n"); + job->compressedSize = hSize; + signalErrorToThreads(ctx); + return arg; + } + ZSTD_invalidateRepCodes(ctx->cctx); + } + { + size_t const ret = (job->lastJobPlusOne == currJob + 1 && remaining == actualBlockSize) ? + ZSTD_compressEnd (ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, actualBlockSize) : + ZSTD_compressContinue(ctx->cctx, job->dst.start + dstPos, job->dst.capacity - dstPos, job->src.start + job->dictSize + srcPos, actualBlockSize); + if (ZSTD_isError(ret)) { + DISPLAY("Error: something went wrong during compression: %s\n", ZSTD_getErrorName(ret)); + signalErrorToThreads(ctx); + return arg; + } + job->compressedSize += ret; + remaining -= actualBlockSize; + srcPos += actualBlockSize; + dstPos += ret; + blockNum++; + + /* update completion */ + pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); + ctx->compressionCompletion = 1 - (double)remaining/job->src.size; + pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); + } + } while (remaining != 0); + job->dst.size = job->compressedSize; + } + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + ctx->jobCompressedID++; + pthread_cond_broadcast(&ctx->jobCompressed_cond.pCond); + pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); + if (job->lastJobPlusOne == currJob + 1 || ctx->threadError) { + /* finished compressing all jobs */ + break; + } + DEBUG(2, "finished compressing job %u\n", currJob); + currJob++; + } + return arg; +} + +static void displayProgress(unsigned cLevel, unsigned last) +{ + if (!g_useProgressBar) return; + UTIL_time_t currTime; + UTIL_getTime(&currTime); + double const timeElapsed = (double)(UTIL_getSpanTimeMicro(g_ticksPerSecond, g_startTime, currTime) / 1000.0); + double const sizeMB = (double)g_streamedSize / (1 << 20); + double const avgCompRate = sizeMB * 1000 / timeElapsed; + fprintf(stderr, "\r| Comp. Level: %2u | Time Elapsed: %7.2f s | Data Size: %7.1f MB | Avg Comp. Rate: %6.2f MB/s |", cLevel, timeElapsed/1000.0, sizeMB, avgCompRate); + if (last) { + fprintf(stderr, "\n"); + } + else { + fflush(stderr); + } +} + +static void* outputThread(void* arg) +{ + outputThreadArg* const otArg = (outputThreadArg*)arg; + adaptCCtx* const ctx = otArg->ctx; + FILE* const dstFile = otArg->dstFile; + + unsigned currJob = 0; + for ( ; ; ) { + unsigned const currJobIndex = currJob % ctx->numJobs; + jobDescription* const job = &ctx->jobs[currJobIndex]; + unsigned willWaitForCompress = 0; + DEBUG(2, "starting write for job %u\n", currJob); + + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + if (currJob + 1 > ctx->jobCompressedID) willWaitForCompress = 1; + pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); + + + pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); + if (willWaitForCompress) { + /* write thread is waiting on compression thread */ + ctx->writeWaitCompressionCompletion = ctx->compressionCompletion; + DEBUG(2, "writer thread waiting for nextJob: %u, writeWaitCompressionCompletion %f\n", currJob, ctx->writeWaitCompressionCompletion); + } + else { + ctx->writeWaitCompressionCompletion = 1; + } + pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); + + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + while (currJob + 1 > ctx->jobCompressedID && !ctx->threadError) { + pthread_cond_wait(&ctx->jobCompressed_cond.pCond, &ctx->jobCompressed_mutex.pMutex); + } + pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); + + /* reset write completion */ + pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); + ctx->writeCompletion = 0; + pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); + + { + size_t const compressedSize = job->compressedSize; + size_t remaining = compressedSize; + if (ZSTD_isError(compressedSize)) { + DISPLAY("Error: an error occurred during compression\n"); + signalErrorToThreads(ctx); + return arg; + } + { + size_t const blockSize = MAX(compressedSize >> 7, 1 << 10); + size_t pos = 0; + for ( ; ; ) { + size_t const writeSize = MIN(remaining, blockSize); + size_t const ret = fwrite(job->dst.start + pos, 1, writeSize, dstFile); + if (ret != writeSize) break; + pos += ret; + remaining -= ret; + + /* update completion variable for writing */ + pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex); + ctx->writeCompletion = 1 - (double)remaining/compressedSize; + pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex); + + if (remaining == 0) break; + } + if (pos != compressedSize) { + DISPLAY("Error: an error occurred during file write operation\n"); + signalErrorToThreads(ctx); + return arg; + } + } + } + displayProgress(ctx->compressionLevel, job->lastJobPlusOne == currJob + 1); + pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex); + ctx->jobWriteID++; + pthread_cond_signal(&ctx->jobWrite_cond.pCond); + pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex); + + if (job->lastJobPlusOne == currJob + 1 || ctx->threadError) { + /* finished with all jobs */ + pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex); + ctx->allJobsCompleted = 1; + pthread_cond_signal(&ctx->allJobsCompleted_cond.pCond); + pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex); + break; + } + DEBUG(2, "finished writing job %u\n", currJob); + currJob++; + + } + return arg; +} + +static int createCompressionJob(adaptCCtx* ctx, size_t srcSize, int last) +{ + unsigned const nextJob = ctx->nextJobID; + unsigned const nextJobIndex = nextJob % ctx->numJobs; + jobDescription* const job = &ctx->jobs[nextJobIndex]; + + + job->compressionLevel = ctx->compressionLevel; + job->src.size = srcSize; + job->jobID = nextJob; + if (last) job->lastJobPlusOne = nextJob + 1; + { + /* swap buffer */ + void* const copy = job->src.start; + job->src.start = ctx->input.buffer.start; + ctx->input.buffer.start = copy; + } + job->dictSize = ctx->lastDictSize; + + ctx->nextJobID++; + /* if not on the last job, reuse data as dictionary in next job */ + if (!last) { + size_t const oldDictSize = ctx->lastDictSize; + memcpy(ctx->input.buffer.start, job->src.start + oldDictSize, srcSize); + ctx->lastDictSize = srcSize; + ctx->input.filled = srcSize; + } + + /* signal job ready */ + pthread_mutex_lock(&ctx->jobReady_mutex.pMutex); + ctx->jobReadyID++; + pthread_cond_signal(&ctx->jobReady_cond.pCond); + pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex); + + return 0; +} + +static int performCompression(adaptCCtx* ctx, FILE* const srcFile, outputThreadArg* otArg) +{ + /* early error check to exit */ + if (!ctx || !srcFile || !otArg) { + return 1; + } + + /* create output thread */ + { + pthread_t out; + if (pthread_create(&out, NULL, &outputThread, otArg)) { + DISPLAY("Error: could not create output thread\n"); + signalErrorToThreads(ctx); + return 1; + } + } + + /* create compression thread */ + { + pthread_t compression; + if (pthread_create(&compression, NULL, &compressionThread, ctx)) { + DISPLAY("Error: could not create compression thread\n"); + signalErrorToThreads(ctx); + return 1; + } + } + { + unsigned currJob = 0; + /* creating jobs */ + for ( ; ; ) { + size_t pos = 0; + size_t const readBlockSize = 1 << 15; + size_t remaining = FILE_CHUNK_SIZE; + unsigned const nextJob = ctx->nextJobID; + unsigned willWaitForCompress = 0; + DEBUG(2, "starting creation of job %u\n", currJob); + + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + if (nextJob - ctx->jobCompressedID >= ctx->numJobs) willWaitForCompress = 1; + pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); + + pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex); + if (willWaitForCompress) { + /* creation thread is waiting, take measurement of completion */ + ctx->createWaitCompressionCompletion = ctx->compressionCompletion; + DEBUG(2, "create thread waiting for nextJob: %u, createWaitCompressionCompletion %f\n", nextJob, ctx->createWaitCompressionCompletion); + } + else { + ctx->createWaitCompressionCompletion = 1; + } + pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex); + + /* wait until the job has been compressed */ + pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex); + while (nextJob - ctx->jobCompressedID >= ctx->numJobs && !ctx->threadError) { + pthread_cond_wait(&ctx->jobCompressed_cond.pCond, &ctx->jobCompressed_mutex.pMutex); + } + pthread_mutex_unlock(&ctx->jobCompressed_mutex.pMutex); + + /* reset create completion */ + pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); + ctx->createCompletion = 0; + pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); + + while (remaining != 0 && !feof(srcFile)) { + size_t const ret = fread(ctx->input.buffer.start + ctx->input.filled + pos, 1, readBlockSize, srcFile); + if (ret != readBlockSize && !feof(srcFile)) { + /* error could not read correct number of bytes */ + DISPLAY("Error: problem occurred during read from src file\n"); + signalErrorToThreads(ctx); + return 1; + } + pos += ret; + remaining -= ret; + pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex); + ctx->createCompletion = 1 - (double)remaining/((size_t)FILE_CHUNK_SIZE); + pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex); + } + if (remaining != 0 && !feof(srcFile)) { + DISPLAY("Error: problem occurred during read from src file\n"); + signalErrorToThreads(ctx); + return 1; + } + g_streamedSize += pos; + /* reading was fine, now create the compression job */ + { + int const last = feof(srcFile); + int const error = createCompressionJob(ctx, pos, last); + if (error != 0) { + signalErrorToThreads(ctx); + return error; + } + } + DEBUG(2, "finished creating job %u\n", currJob); + currJob++; + if (feof(srcFile)) { + break; + } + } + } + /* success -- created all jobs */ + return 0; +} + +static fcResources createFileCompressionResources(const char* const srcFilename, const char* const dstFilenameOrNull) +{ + fcResources fcr; + unsigned const stdinUsed = !strcmp(srcFilename, stdinmark); + FILE* const srcFile = stdinUsed ? stdin : fopen(srcFilename, "rb"); + const char* const outFilenameIntermediate = (stdinUsed && !dstFilenameOrNull) ? stdoutmark : dstFilenameOrNull; + const char* outFilename = outFilenameIntermediate; + char fileAndSuffix[MAX_PATH]; + size_t const numJobs = MAX_NUM_JOBS; + + memset(&fcr, 0, sizeof(fcr)); + + if (!outFilenameIntermediate) { + if (snprintf(fileAndSuffix, MAX_PATH, "%s.zst", srcFilename) + 1 > MAX_PATH) { + DISPLAY("Error: output filename is too long\n"); + return fcr; + } + outFilename = fileAndSuffix; + } + + { + unsigned const stdoutUsed = !strcmp(outFilename, stdoutmark); + FILE* const dstFile = stdoutUsed ? stdout : fopen(outFilename, "wb"); + fcr.otArg = malloc(sizeof(outputThreadArg)); + if (!fcr.otArg) { + DISPLAY("Error: could not allocate space for output thread argument\n"); + return fcr; + } + fcr.otArg->dstFile = dstFile; + } + /* checking for errors */ + if (!fcr.otArg->dstFile || !srcFile) { + DISPLAY("Error: some file(s) could not be opened\n"); + return fcr; + } + + /* creating context */ + fcr.ctx = createCCtx(numJobs); + fcr.otArg->ctx = fcr.ctx; + fcr.srcFile = srcFile; + return fcr; +} + +static int freeFileCompressionResources(fcResources* fcr) +{ + int ret = 0; + waitUntilAllJobsCompleted(fcr->ctx); + ret |= (fcr->srcFile != NULL) ? fclose(fcr->srcFile) : 0; + ret |= (fcr->ctx != NULL) ? freeCCtx(fcr->ctx) : 0; + if (fcr->otArg) { + ret |= (fcr->otArg->dstFile != stdout) ? fclose(fcr->otArg->dstFile) : 0; + free(fcr->otArg); + /* no need to freeCCtx() on otArg->ctx because it should be the same context */ + } + return ret; +} + +static int compressFilename(const char* const srcFilename, const char* const dstFilenameOrNull) +{ + int ret = 0; + UTIL_getTime(&g_startTime); + g_streamedSize = 0; + fcResources fcr = createFileCompressionResources(srcFilename, dstFilenameOrNull); + ret |= performCompression(fcr.ctx, fcr.srcFile, fcr.otArg); + ret |= freeFileCompressionResources(&fcr); + return ret; +} + +static int compressFilenames(const char** filenameTable, unsigned numFiles, unsigned forceStdout) +{ + int ret = 0; + unsigned fileNum; + for (fileNum=0; fileNum MAX_UINT */ +static unsigned readU32FromChar(const char** stringPtr) +{ + unsigned result = 0; + while ((**stringPtr >='0') && (**stringPtr <='9')) + result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; + if ((**stringPtr=='K') || (**stringPtr=='M')) { + result <<= 10; + if (**stringPtr=='M') result <<= 10; + (*stringPtr)++ ; + if (**stringPtr=='i') (*stringPtr)++; + if (**stringPtr=='B') (*stringPtr)++; + } + return result; +} + +static void help() +{ + PRINT("Usage:\n"); + PRINT(" ./multi [options] [file(s)]\n"); + PRINT("\n"); + PRINT("Options:\n"); + PRINT(" -oFILE : specify the output file name\n"); + PRINT(" -i# : provide initial compression level\n"); + PRINT(" -h : display help/information\n"); + PRINT(" -f : force the compression level to stay constant\n"); + PRINT(" -c : force write to stdout\n"); + PRINT(" -p : hide progress bar\n"); + PRINT(" -q : quiet mode -- do not show progress bar or other information\n"); +} +/* return 0 if successful, else return error */ +int main(int argCount, const char* argv[]) +{ + const char* outFilename = NULL; + const char** filenameTable = (const char**)malloc(argCount*sizeof(const char*)); + unsigned filenameIdx = 0; + filenameTable[0] = stdinmark; + unsigned forceStdout = 0; + int ret = 0; + int argNum; + + UTIL_initTimer(&g_ticksPerSecond); + + if (filenameTable == NULL) { + DISPLAY("Error: could not allocate sapce for filename table.\n"); + return 1; + } + + for (argNum=1; argNum 1) { + switch (argument[1]) { + case 'o': + argument += 2; + outFilename = argument; + break; + case 'i': + argument += 2; + g_compressionLevel = readU32FromChar(&argument); + break; + case 'h': + help(); + goto _main_exit; + case 'p': + g_useProgressBar = 0; + break; + case 'c': + forceStdout = 1; + outFilename = stdoutmark; + break; + case 'f': + g_forceCompressionLevel = 1; + break; + case 'q': + g_useProgressBar = 0; + g_displayLevel = 0; + break; + default: + DISPLAY("Error: invalid argument provided\n"); + ret = 1; + goto _main_exit; + } + continue; + } + + /* regular files to be compressed */ + filenameTable[filenameIdx++] = argument; + } + + /* error checking with number of files */ + if (filenameIdx > 1 && (outFilename != NULL && strcmp(outFilename, stdoutmark))) { + DISPLAY("Error: multiple input files provided, cannot use specified output file\n"); + ret = 1; + goto _main_exit; + } + + /* compress files */ + if (filenameIdx <= 1) { + ret |= compressFilename(filenameTable[0], outFilename); + } + else { + ret |= compressFilenames(filenameTable, filenameIdx, forceStdout); + } +_main_exit: + free(filenameTable); + return ret; +} diff --git a/contrib/adaptive-compression/datagencli.c b/contrib/adaptive-compression/datagencli.c new file mode 100644 index 00000000..8a81939d --- /dev/null +++ b/contrib/adaptive-compression/datagencli.c @@ -0,0 +1,129 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/*-************************************ +* Dependencies +**************************************/ +#include "util.h" /* Compiler options */ +#include /* fprintf, stderr */ +#include "datagen.h" /* RDG_generate */ + + +/*-************************************ +* Constants +**************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define SIZE_DEFAULT ((64 KB) + 1) +#define SEED_DEFAULT 0 +#define COMPRESSIBILITY_DEFAULT 50 + + +/*-************************************ +* Macros +**************************************/ +#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) +#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } +static unsigned displayLevel = 2; + + +/*-******************************************************* +* Command line +*********************************************************/ +static int usage(const char* programName) +{ + DISPLAY( "Compressible data generator\n"); + DISPLAY( "Usage :\n"); + DISPLAY( " %s [args]\n", programName); + DISPLAY( "\n"); + DISPLAY( "Arguments :\n"); + DISPLAY( " -g# : generate # data (default:%i)\n", SIZE_DEFAULT); + DISPLAY( " -s# : Select seed (default:%i)\n", SEED_DEFAULT); + DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", + COMPRESSIBILITY_DEFAULT); + DISPLAY( " -h : display help and exit\n"); + return 0; +} + + +int main(int argc, const char** argv) +{ + unsigned probaU32 = COMPRESSIBILITY_DEFAULT; + double litProba = 0.0; + U64 size = SIZE_DEFAULT; + U32 seed = SEED_DEFAULT; + const char* const programName = argv[0]; + + int argNb; + for(argNb=1; argNb='0') && (*argument<='9')) + size *= 10, size += *argument++ - '0'; + if (*argument=='K') { size <<= 10; argument++; } + if (*argument=='M') { size <<= 20; argument++; } + if (*argument=='G') { size <<= 30; argument++; } + if (*argument=='B') { argument++; } + break; + case 's': + argument++; + seed=0; + while ((*argument>='0') && (*argument<='9')) + seed *= 10, seed += *argument++ - '0'; + break; + case 'P': + argument++; + probaU32 = 0; + while ((*argument>='0') && (*argument<='9')) + probaU32 *= 10, probaU32 += *argument++ - '0'; + if (probaU32>100) probaU32 = 100; + break; + case 'L': /* hidden argument : Literal distribution probability */ + argument++; + litProba=0.; + while ((*argument>='0') && (*argument<='9')) + litProba *= 10, litProba += *argument++ - '0'; + if (litProba>100.) litProba=100.; + litProba /= 100.; + break; + case 'v': + displayLevel = 4; + argument++; + break; + default: + return usage(programName); + } + } } } /* for(argNb=1; argNb tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s2 -g500MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s3 -g250MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s4 -g125MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s5 -g50MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s6 -g25MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s7 -g10MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s8 -g5MB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s9 -g500KB > tmp +./adapt -otmp.zst tmp +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- streaming" +./datagen -s10 -g1GB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s11 -g100MB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s12 -g10MB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s13 -g1MB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s14 -g100KB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s15 -g10KB > tmp +cat tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- read limit" +./datagen -s16 -g1GB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s17 -g100MB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s18 -g10MB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s19 -g1MB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s20 -g100KB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s21 -g10KB > tmp +pv -L 50m -q tmp | ./adapt > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- write limit" +./datagen -s22 -g1GB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s23 -g100MB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s24 -g10MB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s25 -g1MB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s26 -g100KB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s27 -g10KB > tmp +pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- read and write limits" +./datagen -s28 -g1GB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s29 -g100MB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s30 -g10MB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s31 -g1MB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s32 -g100KB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s33 -g10KB > tmp +pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- forced compression level" +./datagen -s34 -g1GB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s35 -g100MB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s36 -g10MB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s37 -g1MB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s38 -g100KB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +./datagen -s39 -g10KB > tmp +./adapt tmp -otmp.zst -i11 -f +zstd -d tmp.zst -o tmp2 +diff -s -q tmp tmp2 +rm tmp* + +echo -e "\ncorrectness tests -- window size test" +./datagen -s39 -g1GB | pv -L 25m -q | ./adapt -i1 | pv -q > tmp.zst +zstd -d tmp.zst +rm tmp* +make clean diff --git a/contrib/adaptive-compression/test-performance.sh b/contrib/adaptive-compression/test-performance.sh new file mode 100755 index 00000000..958cb3cc --- /dev/null +++ b/contrib/adaptive-compression/test-performance.sh @@ -0,0 +1,59 @@ +echo "testing time -- no limits set" +./datagen -s1 -g1GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +rm tmp* + +./datagen -s2 -g2GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +rm tmp* + +./datagen -s3 -g4GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +rm tmp* + +echo -e "\ntesting compression ratio -- no limits set" +./datagen -s4 -g1GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +ls -l tmp1.zst tmp2.zst +rm tmp* + +./datagen -s5 -g2GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +ls -l tmp1.zst tmp2.zst +rm tmp* + +./datagen -s6 -g4GB > tmp +time ./adapt -otmp1.zst tmp +time zstd -1 -o tmp2.zst tmp +ls -l tmp1.zst tmp2.zst +rm tmp* + +echo e "\ntesting performance at various compression levels -- no limits set" +./datagen -s7 -g1GB > tmp +echo "adapt" +time ./adapt -i5 -f tmp -otmp1.zst +echo "zstdcli" +time zstd -5 tmp -o tmp2.zst +ls -l tmp1.zst tmp2.zst +rm tmp* + +./datagen -s8 -g1GB > tmp +echo "adapt" +time ./adapt -i10 -f tmp -otmp1.zst +echo "zstdcli" +time zstd -10 tmp -o tmp2.zst +ls -l tmp1.zst tmp2.zst +rm tmp* + +./datagen -s9 -g1GB > tmp +echo "adapt" +time ./adapt -i15 -f tmp -otmp1.zst +echo "zstdcli" +time zstd -15 tmp -o tmp2.zst +ls -l tmp1.zst tmp2.zst +rm tmp* diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index 998fb3d4..a73763d2 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -2992,7 +2992,6 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, return fhSize; } - size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) diff --git a/lib/zstd.h b/lib/zstd.h index a2a756df..dadfe74c 100644 --- a/lib/zstd.h +++ b/lib/zstd.h @@ -802,7 +802,6 @@ ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstC ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - /*- Buffer-less streaming decompression (synchronous mode)