diff --git a/contrib/linux-kernel/README.md b/contrib/linux-kernel/README.md index 16bc2d48..214c520b 100644 --- a/contrib/linux-kernel/README.md +++ b/contrib/linux-kernel/README.md @@ -1,21 +1,33 @@ # Linux Kernel Patch -There are three pieces, the `zstd_compress` and `zstd_decompress` kernel modules, the BtrFS patch, and the SquashFS patch. +There are four pieces, the `xxhash` kernel module, the `zstd_compress` and `zstd_decompress` kernel modules, the BtrFS patch, and the SquashFS patch. The patches are based off of the linux kernel master branch (version 4.10). +## xxHash kernel module + +* The patch is locaed in `xxhash.diff`. +* The header is in `include/linux/xxhash.h`. +* The source is in `lib/xxhash.c`. +* `test/XXHashUserLandTest.cpp` contains tests for the patch in userland by mocking the kernel headers. + I tested the tests by commenting a line of of each branch in `xxhash.c` one line at a time, and made sure the tests failed. + It can be run with the following commands: + ``` + cd test && make googletest && make XXHashUserLandTest && ./XXHashUserLandTest + ``` +* I also benchmarked the `xxhash` module against upstream xxHash, and made sure that they ran at the same speed. + ## Zstd Kernel modules +* The (large) patch is locaed in `zstd.diff`, which depends on `xxhash.diff`. * The header is in `include/linux/zstd.h`. * It is split up into `zstd_compress` and `zstd_decompress`, which can be loaded independently. * Source files are in `lib/zstd/`. * `lib/Kconfig` and `lib/Makefile` need to be modified by applying `lib/Kconfig.diff` and `lib/Makefile.diff` respectively. + These changes are also included in the `zstd.diff`. * `test/UserlandTest.cpp` contains tests for the patch in userland by mocking the kernel headers. It can be run with the following commands: ``` - cd test - make googletest - make UserlandTest - ./UserlandTest + cd test && make googletest && make UserlandTest && ./UserlandTest ``` ## BtrFS diff --git a/contrib/linux-kernel/lib/Kconfig.diff b/contrib/linux-kernel/lib/Kconfig.diff new file mode 100644 index 00000000..227c6e2c --- /dev/null +++ b/contrib/linux-kernel/lib/Kconfig.diff @@ -0,0 +1,19 @@ +diff --git a/lib/Kconfig b/lib/Kconfig +index b6009d7..f00ddab 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -241,6 +241,14 @@ config LZ4HC_COMPRESS + config LZ4_DECOMPRESS + tristate + ++config ZSTD_COMPRESS ++ select XXHASH ++ tristate ++ ++config ZSTD_DECOMPRESS ++ select XXHASH ++ tristate ++ + source "lib/xz/Kconfig" + + # diff --git a/contrib/linux-kernel/lib/Makefile.diff b/contrib/linux-kernel/lib/Makefile.diff new file mode 100644 index 00000000..f92efe8e --- /dev/null +++ b/contrib/linux-kernel/lib/Makefile.diff @@ -0,0 +1,13 @@ +diff --git a/lib/Makefile b/lib/Makefile +index e16f94a..0cfd529 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -115,6 +115,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ + obj-$(CONFIG_LZ4_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ ++obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ ++obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ + obj-$(CONFIG_XZ_DEC) += xz/ + obj-$(CONFIG_RAID6_PQ) += raid6/ + diff --git a/contrib/linux-kernel/zstd.diff b/contrib/linux-kernel/zstd.diff new file mode 100644 index 00000000..c2775bbb --- /dev/null +++ b/contrib/linux-kernel/zstd.diff @@ -0,0 +1,12547 @@ +diff --git a/include/linux/zstd.h b/include/linux/zstd.h +new file mode 100644 +index 0000000..ee7bd82 +--- /dev/null ++++ b/include/linux/zstd.h +@@ -0,0 +1,1150 @@ ++/* ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++#ifndef ZSTD_H ++#define ZSTD_H ++ ++/* ====== Dependency ======*/ ++#include /* size_t */ ++ ++ ++/*-***************************************************************************** ++ * Introduction ++ * ++ * zstd, short for Zstandard, is a fast lossless compression algorithm, ++ * targeting real-time compression scenarios at zlib-level and better ++ * compression ratios. The zstd compression library provides in-memory ++ * compression and decompression functions. The library supports compression ++ * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled ++ * ultra, should be used with caution, as they require more memory. ++ * Compression can be done in: ++ * - a single step, reusing a context (described as Explicit memory management) ++ * - unbounded multiple steps (described as Streaming compression) ++ * The compression ratio achievable on small data can be highly improved using ++ * compression with a dictionary in: ++ * - a single step (described as Simple dictionary API) ++ * - a single step, reusing a dictionary (described as Fast dictionary API) ++ ******************************************************************************/ ++ ++/*====== Helper functions ======*/ ++ ++/** ++ * enum ZSTD_ErrorCode - zstd error codes ++ * ++ * Functions that return size_t can be checked for errors using ZSTD_isError() ++ * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). ++ */ ++typedef enum { ++ ZSTD_error_no_error, ++ ZSTD_error_GENERIC, ++ ZSTD_error_prefix_unknown, ++ ZSTD_error_version_unsupported, ++ ZSTD_error_parameter_unknown, ++ ZSTD_error_frameParameter_unsupported, ++ ZSTD_error_frameParameter_unsupportedBy32bits, ++ ZSTD_error_frameParameter_windowTooLarge, ++ ZSTD_error_compressionParameter_unsupported, ++ ZSTD_error_init_missing, ++ ZSTD_error_memory_allocation, ++ ZSTD_error_stage_wrong, ++ ZSTD_error_dstSize_tooSmall, ++ ZSTD_error_srcSize_wrong, ++ ZSTD_error_corruption_detected, ++ ZSTD_error_checksum_wrong, ++ ZSTD_error_tableLog_tooLarge, ++ ZSTD_error_maxSymbolValue_tooLarge, ++ ZSTD_error_maxSymbolValue_tooSmall, ++ ZSTD_error_dictionary_corrupted, ++ ZSTD_error_dictionary_wrong, ++ ZSTD_error_dictionaryCreation_failed, ++ ZSTD_error_maxCode ++} ZSTD_ErrorCode; ++ ++/** ++ * ZSTD_maxCLevel() - maximum compression level available ++ * ++ * Return: Maximum compression level available. ++ */ ++int ZSTD_maxCLevel(void); ++/** ++ * ZSTD_compressBound() - maximum compressed size in worst case scenario ++ * @srcSize: The size of the data to compress. ++ * ++ * Return: The maximum compressed size in the worst case scenario. ++ */ ++size_t ZSTD_compressBound(size_t srcSize); ++/** ++ * ZSTD_isError() - tells if a size_t function result is an error code ++ * @code: The function result to check for error. ++ * ++ * Return: Non-zero iff the code is an error. ++ */ ++static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) ++{ ++ return code > (size_t)-ZSTD_error_maxCode; ++} ++/** ++ * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode ++ * @functionResult: The result of a function for which ZSTD_isError() is true. ++ * ++ * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 ++ * if the functionResult isn't an error. ++ */ ++static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( ++ size_t functionResult) ++{ ++ if (!ZSTD_isError(functionResult)) ++ return (ZSTD_ErrorCode)0; ++ return (ZSTD_ErrorCode)(0 - functionResult); ++} ++ ++/** ++ * enum ZSTD_strategy - zstd compression search strategy ++ * ++ * From faster to stronger. ++ */ ++typedef enum { ++ ZSTD_fast, ++ ZSTD_dfast, ++ ZSTD_greedy, ++ ZSTD_lazy, ++ ZSTD_lazy2, ++ ZSTD_btlazy2, ++ ZSTD_btopt, ++ ZSTD_btopt2 ++} ZSTD_strategy; ++ ++/** ++ * struct ZSTD_compressionParameters - zstd compression parameters ++ * @windowLog: Log of the largest match distance. Larger means more ++ * compression, and more memory needed during decompression. ++ * @chainLog: Fully searched segment. Larger means more compression, slower, ++ * and more memory (useless for fast). ++ * @hashLog: Dispatch table. Larger means more compression, ++ * slower, and more memory. ++ * @searchLog: Number of searches. Larger means more compression and slower. ++ * @searchLength: Match length searched. Larger means faster decompression, ++ * sometimes less compression. ++ * @targetLength: Acceptable match size for optimal parser (only). Larger means ++ * more compression, and slower. ++ * @strategy: The zstd compression strategy. ++ */ ++typedef struct { ++ unsigned int windowLog; ++ unsigned int chainLog; ++ unsigned int hashLog; ++ unsigned int searchLog; ++ unsigned int searchLength; ++ unsigned int targetLength; ++ ZSTD_strategy strategy; ++} ZSTD_compressionParameters; ++ ++/** ++ * struct ZSTD_frameParameters - zstd frame parameters ++ * @contentSizeFlag: Controls whether content size will be present in the frame ++ * header (when known). ++ * @checksumFlag: Controls whether a 32-bit checksum is generated at the end ++ * of the frame for error detection. ++ * @noDictIDFlag: Controls whether dictID will be saved into the frame header ++ * when using dictionary compression. ++ * ++ * The default value is all fields set to 0. ++ */ ++typedef struct { ++ unsigned int contentSizeFlag; ++ unsigned int checksumFlag; ++ unsigned int noDictIDFlag; ++} ZSTD_frameParameters; ++ ++/** ++ * struct ZSTD_parameters - zstd parameters ++ * @cParams: The compression parameters. ++ * @fParams: The frame parameters. ++ */ ++typedef struct { ++ ZSTD_compressionParameters cParams; ++ ZSTD_frameParameters fParams; ++} ZSTD_parameters; ++ ++/** ++ * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level ++ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). ++ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. ++ * @dictSize: The dictionary size or 0 if a dictionary isn't being used. ++ * ++ * Return: The selected ZSTD_compressionParameters. ++ */ ++ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, ++ unsigned long long estimatedSrcSize, size_t dictSize); ++ ++/** ++ * ZSTD_getParams() - returns ZSTD_parameters for selected level ++ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). ++ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. ++ * @dictSize: The dictionary size or 0 if a dictionary isn't being used. ++ * ++ * The same as ZSTD_getCParams() except also selects the default frame ++ * parameters (all zero). ++ * ++ * Return: The selected ZSTD_parameters. ++ */ ++ZSTD_parameters ZSTD_getParams(int compressionLevel, ++ unsigned long long estimatedSrcSize, size_t dictSize); ++ ++/*-************************************* ++ * Explicit memory management ++ **************************************/ ++ ++/** ++ * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx ++ * @cParams: The compression parameters to be used for compression. ++ * ++ * If multiple compression parameters might be used, the caller must call ++ * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum ++ * size. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initCCtx(). ++ */ ++size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); ++ ++/** ++ * struct ZSTD_CCtx - the zstd compression context ++ * ++ * When compressing many times it is recommended to allocate a context just once ++ * and reuse it for each successive compression operation. ++ */ ++typedef struct ZSTD_CCtx_s ZSTD_CCtx; ++/** ++ * ZSTD_initCCtx() - initialize a zstd compression context ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to ++ * determine how large the workspace must be. ++ * ++ * Return: A compression context emplaced into workspace. ++ */ ++ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); ++ ++/** ++ * ZSTD_compressCCtx() - compress src into dst ++ * @ctx: The context. Must have been initialized with a workspace at ++ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). ++ * @dst: The buffer to compress src into. ++ * @dstCapacity: The size of the destination buffer. May be any size, but ++ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. ++ * @src: The data to compress. ++ * @srcSize: The size of the data to compress. ++ * @params: The parameters to use for compression. See ZSTD_getParams(). ++ * ++ * Return: The compressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize, ZSTD_parameters params); ++ ++/** ++ * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initDCtx(). ++ */ ++size_t ZSTD_DCtxWorkspaceBound(void); ++ ++/** ++ * struct ZSTD_DCtx - the zstd decompression context ++ * ++ * When decompressing many times it is recommended to allocate a context just ++ * once and reuse it for each successive decompression operation. ++ */ ++typedef struct ZSTD_DCtx_s ZSTD_DCtx; ++/** ++ * ZSTD_initDCtx() - initialize a zstd decompression context ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to ++ * determine how large the workspace must be. ++ * ++ * Return: A decompression context emplaced into workspace. ++ */ ++ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); ++ ++/** ++ * ZSTD_decompressDCtx() - decompress zstd compressed src into dst ++ * @ctx: The decompression context. ++ * @dst: The buffer to decompress src into. ++ * @dstCapacity: The size of the destination buffer. Must be at least as large ++ * as the decompressed size. If the caller cannot upper bound the ++ * decompressed size, then it's better to use the streaming API. ++ * @src: The zstd compressed data to decompress. Multiple concatenated ++ * frames and skippable frames are allowed. ++ * @srcSize: The exact size of the data to decompress. ++ * ++ * Return: The decompressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++ ++/*-************************ ++ * Simple dictionary API ++ **************************/ ++ ++/** ++ * ZSTD_compress_usingDict() - compress src into dst using a dictionary ++ * @ctx: The context. Must have been initialized with a workspace at ++ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). ++ * @dst: The buffer to compress src into. ++ * @dstCapacity: The size of the destination buffer. May be any size, but ++ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. ++ * @src: The data to compress. ++ * @srcSize: The size of the data to compress. ++ * @dict: The dictionary to use for compression. ++ * @dictSize: The size of the dictionary. ++ * @params: The parameters to use for compression. See ZSTD_getParams(). ++ * ++ * Compression using a predefined dictionary. The same dictionary must be used ++ * during decompression. ++ * ++ * Return: The compressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize, const void *dict, size_t dictSize, ++ ZSTD_parameters params); ++ ++/** ++ * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary ++ * @ctx: The decompression context. ++ * @dst: The buffer to decompress src into. ++ * @dstCapacity: The size of the destination buffer. Must be at least as large ++ * as the decompressed size. If the caller cannot upper bound the ++ * decompressed size, then it's better to use the streaming API. ++ * @src: The zstd compressed data to decompress. Multiple concatenated ++ * frames and skippable frames are allowed. ++ * @srcSize: The exact size of the data to decompress. ++ * @dict: The dictionary to use for decompression. The same dictionary ++ * must've been used to compress the data. ++ * @dictSize: The size of the dictionary. ++ * ++ * Return: The decompressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize, const void *dict, size_t dictSize); ++ ++/*-************************** ++ * Fast dictionary API ++ ***************************/ ++ ++/** ++ * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict ++ * @cParams: The compression parameters to be used for compression. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initCDict(). ++ */ ++size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); ++ ++/** ++ * struct ZSTD_CDict - a digested dictionary to be used for compression ++ */ ++typedef struct ZSTD_CDict_s ZSTD_CDict; ++ ++/** ++ * ZSTD_initCDict() - initialize a digested dictionary for compression ++ * @dictBuffer: The dictionary to digest. The buffer is referenced by the ++ * ZSTD_CDict so it must outlive the returned ZSTD_CDict. ++ * @dictSize: The size of the dictionary. ++ * @params: The parameters to use for compression. See ZSTD_getParams(). ++ * @workspace: The workspace. It must outlive the returned ZSTD_CDict. ++ * @workspaceSize: The workspace size. Must be at least ++ * ZSTD_CDictWorkspaceBound(params.cParams). ++ * ++ * When compressing multiple messages / blocks with the same dictionary it is ++ * recommended to load it just once. The ZSTD_CDict merely references the ++ * dictBuffer, so it must outlive the returned ZSTD_CDict. ++ * ++ * Return: The digested dictionary emplaced into workspace. ++ */ ++ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, ++ ZSTD_parameters params, void *workspace, size_t workspaceSize); ++ ++/** ++ * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict ++ * @ctx: The context. Must have been initialized with a workspace at ++ * least as large as ZSTD_CCtxWorkspaceBound(cParams) where ++ * cParams are the compression parameters used to initialize the ++ * cdict. ++ * @dst: The buffer to compress src into. ++ * @dstCapacity: The size of the destination buffer. May be any size, but ++ * ZSTD_compressBound(srcSize) is guaranteed to be large enough. ++ * @src: The data to compress. ++ * @srcSize: The size of the data to compress. ++ * @cdict: The digested dictionary to use for compression. ++ * @params: The parameters to use for compression. See ZSTD_getParams(). ++ * ++ * Compression using a digested dictionary. The same dictionary must be used ++ * during decompression. ++ * ++ * Return: The compressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize, const ZSTD_CDict *cdict); ++ ++ ++/** ++ * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initDDict(). ++ */ ++size_t ZSTD_DDictWorkspaceBound(void); ++ ++/** ++ * struct ZSTD_DDict - a digested dictionary to be used for decompression ++ */ ++typedef struct ZSTD_DDict_s ZSTD_DDict; ++ ++/** ++ * ZSTD_initDDict() - initialize a digested dictionary for decompression ++ * @dictBuffer: The dictionary to digest. The buffer is referenced by the ++ * ZSTD_DDict so it must outlive the returned ZSTD_DDict. ++ * @dictSize: The size of the dictionary. ++ * @workspace: The workspace. It must outlive the returned ZSTD_DDict. ++ * @workspaceSize: The workspace size. Must be at least ++ * ZSTD_DDictWorkspaceBound(). ++ * ++ * When decompressing multiple messages / blocks with the same dictionary it is ++ * recommended to load it just once. The ZSTD_DDict merely references the ++ * dictBuffer, so it must outlive the returned ZSTD_DDict. ++ * ++ * Return: The digested dictionary emplaced into workspace. ++ */ ++ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, ++ void *workspace, size_t workspaceSize); ++ ++/** ++ * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict ++ * @ctx: The decompression context. ++ * @dst: The buffer to decompress src into. ++ * @dstCapacity: The size of the destination buffer. Must be at least as large ++ * as the decompressed size. If the caller cannot upper bound the ++ * decompressed size, then it's better to use the streaming API. ++ * @src: The zstd compressed data to decompress. Multiple concatenated ++ * frames and skippable frames are allowed. ++ * @srcSize: The exact size of the data to decompress. ++ * @ddict: The digested dictionary to use for decompression. The same ++ * dictionary must've been used to compress the data. ++ * ++ * Return: The decompressed size or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, ++ size_t dstCapacity, const void *src, size_t srcSize, ++ const ZSTD_DDict *ddict); ++ ++ ++/*-************************** ++ * Streaming ++ ***************************/ ++ ++/** ++ * struct ZSTD_inBuffer - input buffer for streaming ++ * @src: Start of the input buffer. ++ * @size: Size of the input buffer. ++ * @pos: Position where reading stopped. Will be updated. ++ * Necessarily 0 <= pos <= size. ++ */ ++typedef struct ZSTD_inBuffer_s { ++ const void *src; ++ size_t size; ++ size_t pos; ++} ZSTD_inBuffer; ++ ++/** ++ * struct ZSTD_outBuffer - output buffer for streaming ++ * @dst: Start of the output buffer. ++ * @size: Size of the output buffer. ++ * @pos: Position where writing stopped. Will be updated. ++ * Necessarily 0 <= pos <= size. ++ */ ++typedef struct ZSTD_outBuffer_s { ++ void *dst; ++ size_t size; ++ size_t pos; ++} ZSTD_outBuffer; ++ ++ ++ ++/*-***************************************************************************** ++ * Streaming compression - HowTo ++ * ++ * A ZSTD_CStream object is required to track streaming operation. ++ * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. ++ * ZSTD_CStream objects can be reused multiple times on consecutive compression ++ * operations. It is recommended to re-use ZSTD_CStream in situations where many ++ * streaming operations will be achieved consecutively. Use one separate ++ * ZSTD_CStream per thread for parallel execution. ++ * ++ * Use ZSTD_compressStream() repetitively to consume input stream. ++ * The function will automatically update both `pos` fields. ++ * Note that it may not consume the entire input, in which case `pos < size`, ++ * and it's up to the caller to present again remaining data. ++ * It returns a hint for the preferred number of bytes to use as an input for ++ * the next function call. ++ * ++ * At any moment, it's possible to flush whatever data remains within internal ++ * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might ++ * still be some content left within the internal buffer if `output->size` is ++ * too small. It returns the number of bytes left in the internal buffer and ++ * must be called until it returns 0. ++ * ++ * ZSTD_endStream() instructs to finish a frame. It will perform a flush and ++ * write frame epilogue. The epilogue is required for decoders to consider a ++ * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush ++ * the full content if `output->size` is too small. In which case, call again ++ * ZSTD_endStream() to complete the flush. It returns the number of bytes left ++ * in the internal buffer and must be called until it returns 0. ++ ******************************************************************************/ ++ ++/** ++ * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream ++ * @cParams: The compression parameters to be used for compression. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). ++ */ ++size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); ++ ++/** ++ * struct ZSTD_CStream - the zstd streaming compression context ++ */ ++typedef struct ZSTD_CStream_s ZSTD_CStream; ++ ++/*===== ZSTD_CStream management functions =====*/ ++/** ++ * ZSTD_initCStream() - initialize a zstd streaming compression context ++ * @params: The zstd compression parameters. ++ * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must ++ * pass the source size (zero means empty source). Otherwise, ++ * the caller may optionally pass the source size, or zero if ++ * unknown. ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. ++ * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine ++ * how large the workspace must be. ++ * ++ * Return: The zstd streaming compression context. ++ */ ++ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, ++ unsigned long long pledgedSrcSize, void *workspace, ++ size_t workspaceSize); ++ ++/** ++ * ZSTD_initCStream_usingCDict() - initialize a streaming compression context ++ * @cdict: The digested dictionary to use for compression. ++ * @pledgedSrcSize: Optionally the source size, or zero if unknown. ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() ++ * with the cParams used to initialize the cdict to determine ++ * how large the workspace must be. ++ * ++ * Return: The zstd streaming compression context. ++ */ ++ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, ++ unsigned long long pledgedSrcSize, void *workspace, ++ size_t workspaceSize); ++ ++/*===== Streaming compression functions =====*/ ++/** ++ * ZSTD_resetCStream() - reset the context using parameters from creation ++ * @zcs: The zstd streaming compression context to reset. ++ * @pledgedSrcSize: Optionally the source size, or zero if unknown. ++ * ++ * Resets the context using the parameters from creation. Skips dictionary ++ * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame ++ * content size is always written into the frame header. ++ * ++ * Return: Zero or an error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); ++/** ++ * ZSTD_compressStream() - streaming compress some of input into output ++ * @zcs: The zstd streaming compression context. ++ * @output: Destination buffer. `output->pos` is updated to indicate how much ++ * compressed data was written. ++ * @input: Source buffer. `input->pos` is updated to indicate how much data was ++ * read. Note that it may not consume the entire input, in which case ++ * `input->pos < input->size`, and it's up to the caller to present ++ * remaining data again. ++ * ++ * The `input` and `output` buffers may be any size. Guaranteed to make some ++ * forward progress if `input` and `output` are not empty. ++ * ++ * Return: A hint for the number of bytes to use as the input for the next ++ * function call or an error, which can be checked using ++ * ZSTD_isError(). ++ */ ++size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ++ ZSTD_inBuffer *input); ++/** ++ * ZSTD_flushStream() - flush internal buffers into output ++ * @zcs: The zstd streaming compression context. ++ * @output: Destination buffer. `output->pos` is updated to indicate how much ++ * compressed data was written. ++ * ++ * ZSTD_flushStream() must be called until it returns 0, meaning all the data ++ * has been flushed. Since ZSTD_flushStream() causes a block to be ended, ++ * calling it too often will degrade the compression ratio. ++ * ++ * Return: The number of bytes still present within internal buffers or an ++ * error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); ++/** ++ * ZSTD_endStream() - flush internal buffers into output and end the frame ++ * @zcs: The zstd streaming compression context. ++ * @output: Destination buffer. `output->pos` is updated to indicate how much ++ * compressed data was written. ++ * ++ * ZSTD_endStream() must be called until it returns 0, meaning all the data has ++ * been flushed and the frame epilogue has been written. ++ * ++ * Return: The number of bytes still present within internal buffers or an ++ * error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); ++ ++/** ++ * ZSTD_CStreamInSize() - recommended size for the input buffer ++ * ++ * Return: The recommended size for the input buffer. ++ */ ++size_t ZSTD_CStreamInSize(void); ++/** ++ * ZSTD_CStreamOutSize() - recommended size for the output buffer ++ * ++ * When the output buffer is at least this large, it is guaranteed to be large ++ * enough to flush at least one complete compressed block. ++ * ++ * Return: The recommended size for the output buffer. ++ */ ++size_t ZSTD_CStreamOutSize(void); ++ ++ ++ ++/*-***************************************************************************** ++ * Streaming decompression - HowTo ++ * ++ * A ZSTD_DStream object is required to track streaming operations. ++ * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. ++ * ZSTD_DStream objects can be re-used multiple times. ++ * ++ * Use ZSTD_decompressStream() repetitively to consume your input. ++ * The function will update both `pos` fields. ++ * If `input->pos < input->size`, some input has not been consumed. ++ * It's up to the caller to present again remaining data. ++ * If `output->pos < output->size`, decoder has flushed everything it could. ++ * Returns 0 iff a frame is completely decoded and fully flushed. ++ * Otherwise it returns a suggested next input size that will never load more ++ * than the current frame. ++ ******************************************************************************/ ++ ++/** ++ * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream ++ * @maxWindowSize: The maximum window size allowed for compressed frames. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). ++ */ ++size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); ++ ++/** ++ * struct ZSTD_DStream - the zstd streaming decompression context ++ */ ++typedef struct ZSTD_DStream_s ZSTD_DStream; ++/*===== ZSTD_DStream management functions =====*/ ++/** ++ * ZSTD_initDStream() - initialize a zstd streaming decompression context ++ * @maxWindowSize: The maximum window size allowed for compressed frames. ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. ++ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine ++ * how large the workspace must be. ++ * ++ * Return: The zstd streaming decompression context. ++ */ ++ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, ++ size_t workspaceSize); ++/** ++ * ZSTD_initDStream_usingDDict() - initialize streaming decompression context ++ * @maxWindowSize: The maximum window size allowed for compressed frames. ++ * @ddict: The digested dictionary to use for decompression. ++ * @workspace: The workspace to emplace the context into. It must outlive ++ * the returned context. ++ * @workspaceSize: The size of workspace. ++ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine ++ * how large the workspace must be. ++ * ++ * Return: The zstd streaming decompression context. ++ */ ++ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, ++ const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); ++ ++/*===== Streaming decompression functions =====*/ ++/** ++ * ZSTD_resetDStream() - reset the context using parameters from creation ++ * @zds: The zstd streaming decompression context to reset. ++ * ++ * Resets the context using the parameters from creation. Skips dictionary ++ * loading, since it can be reused. ++ * ++ * Return: Zero or an error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_resetDStream(ZSTD_DStream *zds); ++/** ++ * ZSTD_decompressStream() - streaming decompress some of input into output ++ * @zds: The zstd streaming decompression context. ++ * @output: Destination buffer. `output.pos` is updated to indicate how much ++ * decompressed data was written. ++ * @input: Source buffer. `input.pos` is updated to indicate how much data was ++ * read. Note that it may not consume the entire input, in which case ++ * `input.pos < input.size`, and it's up to the caller to present ++ * remaining data again. ++ * ++ * The `input` and `output` buffers may be any size. Guaranteed to make some ++ * forward progress if `input` and `output` are not empty. ++ * ZSTD_decompressStream() will not consume the last byte of the frame until ++ * the entire frame is flushed. ++ * ++ * Return: Returns 0 iff a frame is completely decoded and fully flushed. ++ * Otherwise returns a hint for the number of bytes to use as the input ++ * for the next function call or an error, which can be checked using ++ * ZSTD_isError(). The size hint will never load more than the frame. ++ */ ++size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ++ ZSTD_inBuffer *input); ++ ++/** ++ * ZSTD_DStreamInSize() - recommended size for the input buffer ++ * ++ * Return: The recommended size for the input buffer. ++ */ ++size_t ZSTD_DStreamInSize(void); ++/** ++ * ZSTD_DStreamOutSize() - recommended size for the output buffer ++ * ++ * When the output buffer is at least this large, it is guaranteed to be large ++ * enough to flush at least one complete decompressed block. ++ * ++ * Return: The recommended size for the output buffer. ++ */ ++size_t ZSTD_DStreamOutSize(void); ++ ++ ++/* --- Constants ---*/ ++#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ ++#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U ++ ++#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) ++#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) ++ ++#define ZSTD_WINDOWLOG_MAX_32 27 ++#define ZSTD_WINDOWLOG_MAX_64 27 ++#define ZSTD_WINDOWLOG_MAX \ ++ ((unsigned int)(sizeof(size_t) == 4 \ ++ ? ZSTD_WINDOWLOG_MAX_32 \ ++ : ZSTD_WINDOWLOG_MAX_64)) ++#define ZSTD_WINDOWLOG_MIN 10 ++#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX ++#define ZSTD_HASHLOG_MIN 6 ++#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) ++#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN ++#define ZSTD_HASHLOG3_MAX 17 ++#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) ++#define ZSTD_SEARCHLOG_MIN 1 ++/* only for ZSTD_fast, other strategies are limited to 6 */ ++#define ZSTD_SEARCHLENGTH_MAX 7 ++/* only for ZSTD_btopt, other strategies are limited to 4 */ ++#define ZSTD_SEARCHLENGTH_MIN 3 ++#define ZSTD_TARGETLENGTH_MIN 4 ++#define ZSTD_TARGETLENGTH_MAX 999 ++ ++/* for static allocation */ ++#define ZSTD_FRAMEHEADERSIZE_MAX 18 ++#define ZSTD_FRAMEHEADERSIZE_MIN 6 ++static const size_t ZSTD_frameHeaderSize_prefix = 5; ++static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; ++static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; ++/* magic number + skippable frame length */ ++static const size_t ZSTD_skippableHeaderSize = 8; ++ ++ ++/*-************************************* ++ * Compressed size functions ++ **************************************/ ++ ++/** ++ * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame ++ * @src: Source buffer. It should point to the start of a zstd encoded frame ++ * or a skippable frame. ++ * @srcSize: The size of the source buffer. It must be at least as large as the ++ * size of the frame. ++ * ++ * Return: The compressed size of the frame pointed to by `src` or an error, ++ * which can be check with ZSTD_isError(). ++ * Suitable to pass to ZSTD_decompress() or similar functions. ++ */ ++size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); ++ ++/*-************************************* ++ * Decompressed size functions ++ **************************************/ ++/** ++ * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header ++ * @src: It should point to the start of a zstd encoded frame. ++ * @srcSize: The size of the source buffer. It must be at least as large as the ++ * frame header. `ZSTD_frameHeaderSize_max` is always large enough. ++ * ++ * Return: The frame content size stored in the frame header if known. ++ * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the ++ * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. ++ */ ++unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); ++ ++/** ++ * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames ++ * @src: It should point to the start of a series of zstd encoded and/or ++ * skippable frames. ++ * @srcSize: The exact size of the series of frames. ++ * ++ * If any zstd encoded frame in the series doesn't have the frame content size ++ * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always ++ * set when using ZSTD_compress(). The decompressed size can be very large. ++ * If the source is untrusted, the decompressed size could be wrong or ++ * intentionally modified. Always ensure the result fits within the ++ * application's authorized limits. ZSTD_findDecompressedSize() handles multiple ++ * frames, and so it must traverse the input to read each frame header. This is ++ * efficient as most of the data is skipped, however it does mean that all frame ++ * data must be present and valid. ++ * ++ * Return: Decompressed size of all the data contained in the frames if known. ++ * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. ++ * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. ++ */ ++unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); ++ ++/*-************************************* ++ * Advanced compression functions ++ **************************************/ ++/** ++ * ZSTD_checkCParams() - ensure parameter values remain within authorized range ++ * @cParams: The zstd compression parameters. ++ * ++ * Return: Zero or an error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); ++ ++/** ++ * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize ++ * @srcSize: Optionally the estimated source size, or zero if unknown. ++ * @dictSize: Optionally the estimated dictionary size, or zero if unknown. ++ * ++ * Return: The optimized parameters. ++ */ ++ZSTD_compressionParameters ZSTD_adjustCParams( ++ ZSTD_compressionParameters cParams, unsigned long long srcSize, ++ size_t dictSize); ++ ++/*--- Advanced decompression functions ---*/ ++ ++/** ++ * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame ++ * @buffer: The source buffer to check. ++ * @size: The size of the source buffer, must be at least 4 bytes. ++ * ++ * Return: True iff the buffer starts with a zstd or skippable frame identifier. ++ */ ++unsigned int ZSTD_isFrame(const void *buffer, size_t size); ++ ++/** ++ * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary ++ * @dict: The dictionary buffer. ++ * @dictSize: The size of the dictionary buffer. ++ * ++ * Return: The dictionary id stored within the dictionary or 0 if the ++ * dictionary is not a zstd dictionary. If it returns 0 the ++ * dictionary can still be loaded as a content-only dictionary. ++ */ ++unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); ++ ++/** ++ * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict ++ * @ddict: The ddict to find the id of. ++ * ++ * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not ++ * a zstd dictionary. If it returns 0 `ddict` will be loaded as a ++ * content-only dictionary. ++ */ ++unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); ++ ++/** ++ * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame ++ * @src: Source buffer. It must be a zstd encoded frame. ++ * @srcSize: The size of the source buffer. It must be at least as large as the ++ * frame header. `ZSTD_frameHeaderSize_max` is always large enough. ++ * ++ * Return: The dictionary id required to decompress the frame stored within ++ * `src` or 0 if the dictionary id could not be decoded. It can return ++ * 0 if the frame does not require a dictionary, the dictionary id ++ * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` ++ * is too small. ++ */ ++unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); ++ ++/** ++ * struct ZSTD_frameParams - zstd frame parameters stored in the frame header ++ * @frameContentSize: The frame content size, or 0 if not present. ++ * @windowSize: The window size, or 0 if the frame is a skippable frame. ++ * @dictID: The dictionary id, or 0 if not present. ++ * @checksumFlag: Whether a checksum was used. ++ */ ++typedef struct { ++ unsigned long long frameContentSize; ++ unsigned int windowSize; ++ unsigned int dictID; ++ unsigned int checksumFlag; ++} ZSTD_frameParams; ++ ++/** ++ * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame ++ * @fparamsPtr: On success the frame parameters are written here. ++ * @src: The source buffer. It must point to a zstd or skippable frame. ++ * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is ++ * always large enough to succeed. ++ * ++ * Return: 0 on success. If more data is required it returns how many bytes ++ * must be provided to make forward progress. Otherwise it returns ++ * an error, which can be checked using ZSTD_isError(). ++ */ ++size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, ++ size_t srcSize); ++ ++/*-***************************************************************************** ++ * Buffer-less and synchronous inner streaming functions ++ * ++ * This is an advanced API, giving full control over buffer management, for ++ * users which need direct control over memory. ++ * But it's also a complex one, with many restrictions (documented below). ++ * Prefer using normal streaming API for an easier experience ++ ******************************************************************************/ ++ ++/*-***************************************************************************** ++ * Buffer-less streaming compression (synchronous mode) ++ * ++ * A ZSTD_CCtx object is required to track streaming operations. ++ * Use ZSTD_initCCtx() to initialize a context. ++ * ZSTD_CCtx object can be re-used multiple times within successive compression ++ * operations. ++ * ++ * Start by initializing a context. ++ * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary ++ * compression, ++ * or ZSTD_compressBegin_advanced(), for finer parameter control. ++ * It's also possible to duplicate a reference context which has already been ++ * initialized, using ZSTD_copyCCtx() ++ * ++ * Then, consume your input using ZSTD_compressContinue(). ++ * There are some important considerations to keep in mind when using this ++ * advanced function : ++ * - ZSTD_compressContinue() has no internal buffer. It uses externally provided ++ * buffer only. ++ * - Interface is synchronous : input is consumed entirely and produce 1+ ++ * (or more) compressed blocks. ++ * - Caller must ensure there is enough space in `dst` to store compressed data ++ * under worst case scenario. Worst case evaluation is provided by ++ * ZSTD_compressBound(). ++ * ZSTD_compressContinue() doesn't guarantee recover after a failed ++ * compression. ++ * - ZSTD_compressContinue() presumes prior input ***is still accessible and ++ * unmodified*** (up to maximum distance size, see WindowLog). ++ * It remembers all previous contiguous blocks, plus one separated memory ++ * segment (which can itself consists of multiple contiguous blocks) ++ * - ZSTD_compressContinue() detects that prior input has been overwritten when ++ * `src` buffer overlaps. In which case, it will "discard" the relevant memory ++ * section from its history. ++ * ++ * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) ++ * and optional checksum. It's possible to use srcSize==0, in which case, it ++ * will write a final empty block to end the frame. Without last block mark, ++ * frames will be considered unfinished (corrupted) by decoders. ++ * ++ * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new ++ * frame. ++ ******************************************************************************/ ++ ++/*===== Buffer-less streaming compression functions =====*/ ++size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); ++size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, ++ size_t dictSize, int compressionLevel); ++size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, ++ size_t dictSize, ZSTD_parameters params, ++ unsigned long long pledgedSrcSize); ++size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, ++ unsigned long long pledgedSrcSize); ++size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, ++ unsigned long long pledgedSrcSize); ++size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++ ++ ++ ++/*-***************************************************************************** ++ * Buffer-less streaming decompression (synchronous mode) ++ * ++ * A ZSTD_DCtx object is required to track streaming operations. ++ * Use ZSTD_initDCtx() to initialize a context. ++ * A ZSTD_DCtx object can be re-used multiple times. ++ * ++ * First typical operation is to retrieve frame parameters, using ++ * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide ++ * important information to correctly decode the frame, such as the minimum ++ * rolling buffer size to allocate to decompress data (`windowSize`), and the ++ * dictionary ID used. ++ * Note: content size is optional, it may not be present. 0 means unknown. ++ * Note that these values could be wrong, either because of data malformation, ++ * or because an attacker is spoofing deliberate false information. As a ++ * consequence, check that values remain within valid application range, ++ * especially `windowSize`, before allocation. Each application can set its own ++ * limit, depending on local restrictions. For extended interoperability, it is ++ * recommended to support at least 8 MB. ++ * Frame parameters are extracted from the beginning of the compressed frame. ++ * Data fragment must be large enough to ensure successful decoding, typically ++ * `ZSTD_frameHeaderSize_max` bytes. ++ * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. ++ * >0: `srcSize` is too small, provide at least this many bytes. ++ * errorCode, which can be tested using ZSTD_isError(). ++ * ++ * Start decompression, with ZSTD_decompressBegin() or ++ * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared ++ * context, using ZSTD_copyDCtx(). ++ * ++ * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() ++ * alternatively. ++ * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' ++ * to ZSTD_decompressContinue(). ++ * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will ++ * fail. ++ * ++ * The result of ZSTD_decompressContinue() is the number of bytes regenerated ++ * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an ++ * error; it just means ZSTD_decompressContinue() has decoded some metadata ++ * item. It can also be an error code, which can be tested with ZSTD_isError(). ++ * ++ * ZSTD_decompressContinue() needs previous data blocks during decompression, up ++ * to `windowSize`. They should preferably be located contiguously, prior to ++ * current block. Alternatively, a round buffer of sufficient size is also ++ * possible. Sufficient size is determined by frame parameters. ++ * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't ++ * follow each other, make sure that either the compressor breaks contiguity at ++ * the same place, or that previous contiguous segment is large enough to ++ * properly handle maximum back-reference. ++ * ++ * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. ++ * Context can then be reset to start a new decompression. ++ * ++ * Note: it's possible to know if next input to present is a header or a block, ++ * using ZSTD_nextInputType(). This information is not required to properly ++ * decode a frame. ++ * ++ * == Special case: skippable frames == ++ * ++ * Skippable frames allow integration of user-defined data into a flow of ++ * concatenated frames. Skippable frames will be ignored (skipped) by a ++ * decompressor. The format of skippable frames is as follows: ++ * a) Skippable frame ID - 4 Bytes, Little endian format, any value from ++ * 0x184D2A50 to 0x184D2A5F ++ * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits ++ * c) Frame Content - any content (User Data) of length equal to Frame Size ++ * For skippable frames ZSTD_decompressContinue() always returns 0. ++ * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 ++ * what means that a frame is skippable. ++ * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might ++ * actually be a zstd encoded frame with no content. For purposes of ++ * decompression, it is valid in both cases to skip the frame using ++ * ZSTD_findFrameCompressedSize() to find its size in bytes. ++ * It also returns frame size as fparamsPtr->frameContentSize. ++ ******************************************************************************/ ++ ++/*===== Buffer-less streaming decompression functions =====*/ ++size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); ++size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, ++ size_t dictSize); ++void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); ++size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); ++size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++typedef enum { ++ ZSTDnit_frameHeader, ++ ZSTDnit_blockHeader, ++ ZSTDnit_block, ++ ZSTDnit_lastBlock, ++ ZSTDnit_checksum, ++ ZSTDnit_skippableFrame ++} ZSTD_nextInputType_e; ++ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); ++ ++/*-***************************************************************************** ++ * Block functions ++ * ++ * Block functions produce and decode raw zstd blocks, without frame metadata. ++ * Frame metadata cost is typically ~18 bytes, which can be non-negligible for ++ * very small blocks (< 100 bytes). User will have to take in charge required ++ * information to regenerate data, such as compressed and content sizes. ++ * ++ * A few rules to respect: ++ * - Compressing and decompressing require a context structure ++ * + Use ZSTD_initCCtx() and ZSTD_initDCtx() ++ * - It is necessary to init context before starting ++ * + compression : ZSTD_compressBegin() ++ * + decompression : ZSTD_decompressBegin() ++ * + variants _usingDict() are also allowed ++ * + copyCCtx() and copyDCtx() work too ++ * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() ++ * + If you need to compress more, cut data into multiple blocks ++ * + Consider using the regular ZSTD_compress() instead, as frame metadata ++ * costs become negligible when source size is large. ++ * - When a block is considered not compressible enough, ZSTD_compressBlock() ++ * result will be zero. In which case, nothing is produced into `dst`. ++ * + User must test for such outcome and deal directly with uncompressed data ++ * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! ++ * + In case of multiple successive blocks, decoder must be informed of ++ * uncompressed block existence to follow proper history. Use ++ * ZSTD_insertBlock() in such a case. ++ ******************************************************************************/ ++ ++/* Define for static allocation */ ++#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) ++/*===== Raw zstd block functions =====*/ ++size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); ++size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, ++ const void *src, size_t srcSize); ++size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, ++ size_t blockSize); ++ ++#endif /* ZSTD_H */ +diff --git a/lib/Kconfig b/lib/Kconfig +index b6009d7..f00ddab 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -241,6 +241,14 @@ config LZ4HC_COMPRESS + config LZ4_DECOMPRESS + tristate + ++config ZSTD_COMPRESS ++ select XXHASH ++ tristate ++ ++config ZSTD_DECOMPRESS ++ select XXHASH ++ tristate ++ + source "lib/xz/Kconfig" + + # +diff --git a/lib/Makefile b/lib/Makefile +index e16f94a..0cfd529 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -115,6 +115,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ + obj-$(CONFIG_LZ4_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ ++obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ ++obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ + obj-$(CONFIG_XZ_DEC) += xz/ + obj-$(CONFIG_RAID6_PQ) += raid6/ + +diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile +new file mode 100644 +index 0000000..aa5eb4d +--- /dev/null ++++ b/lib/zstd/Makefile +@@ -0,0 +1,9 @@ ++obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o ++obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o ++ ++ccflags-y += -O3 ++ ++zstd_compress-y := entropy_common.o fse_decompress.o zstd_common.o \ ++ fse_compress.o huf_compress.o compress.o ++zstd_decompress-y := entropy_common.o fse_decompress.o zstd_common.o \ ++ huf_decompress.o decompress.o +diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h +new file mode 100644 +index 0000000..9d21540 +--- /dev/null ++++ b/lib/zstd/bitstream.h +@@ -0,0 +1,391 @@ ++/* ****************************************************************** ++ bitstream ++ Part of FSE library ++ header file (to include) ++ Copyright (C) 2013-2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ++****************************************************************** */ ++#ifndef BITSTREAM_H_MODULE ++#define BITSTREAM_H_MODULE ++ ++/* ++* This API consists of small unitary functions, which must be inlined for best performance. ++* Since link-time-optimization is not available for all compilers, ++* these functions are defined into a .h to be included. ++*/ ++ ++/*-**************************************** ++* Dependencies ++******************************************/ ++#include "mem.h" /* unaligned access routines */ ++#include "error_private.h" /* error codes and messages */ ++ ++ ++/*========================================= ++* Target specific ++=========================================*/ ++#define STREAM_ACCUMULATOR_MIN_32 25 ++#define STREAM_ACCUMULATOR_MIN_64 57 ++#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) ++ ++/*-****************************************** ++* bitStream encoding API (write forward) ++********************************************/ ++/* bitStream can mix input from multiple sources. ++* A critical property of these streams is that they encode and decode in **reverse** direction. ++* So the first bit sequence you add will be the last to be read, like a LIFO stack. ++*/ ++typedef struct ++{ ++ size_t bitContainer; ++ int bitPos; ++ char* startPtr; ++ char* ptr; ++ char* endPtr; ++} BIT_CStream_t; ++ ++MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); ++MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); ++MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); ++MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); ++ ++/* Start with initCStream, providing the size of buffer to write into. ++* bitStream will never write outside of this buffer. ++* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. ++* ++* bits are first added to a local register. ++* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. ++* Writing data into memory is an explicit operation, performed by the flushBits function. ++* Hence keep track how many bits are potentially stored into local register to avoid register overflow. ++* After a flushBits, a maximum of 7 bits might still be stored into local register. ++* ++* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. ++* ++* Last operation is to close the bitStream. ++* The function returns the final size of CStream in bytes. ++* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) ++*/ ++ ++ ++/*-******************************************** ++* bitStream decoding API (read backward) ++**********************************************/ ++typedef struct ++{ ++ size_t bitContainer; ++ unsigned bitsConsumed; ++ const char* ptr; ++ const char* start; ++} BIT_DStream_t; ++ ++typedef enum { BIT_DStream_unfinished = 0, ++ BIT_DStream_endOfBuffer = 1, ++ BIT_DStream_completed = 2, ++ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ ++ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ ++ ++MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); ++MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); ++MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); ++MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); ++ ++ ++/* Start by invoking BIT_initDStream(). ++* A chunk of the bitStream is then stored into a local register. ++* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). ++* You can then retrieve bitFields stored into the local register, **in reverse order**. ++* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. ++* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. ++* Otherwise, it can be less than that, so proceed accordingly. ++* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). ++*/ ++ ++ ++/*-**************************************** ++* unsafe API ++******************************************/ ++MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); ++/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ ++ ++MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); ++/* unsafe version; does not check buffer overflow */ ++ ++MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); ++/* faster, but works only if nbBits >= 1 */ ++ ++ ++ ++/*-************************************************************** ++* Internal functions ++****************************************************************/ ++MEM_STATIC unsigned BIT_highbit32 (register U32 val) ++{ ++# if defined(_MSC_VER) /* Visual */ ++ unsigned long r=0; ++ _BitScanReverse ( &r, val ); ++ return (unsigned) r; ++# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ ++ return 31 - __builtin_clz (val); ++# else /* Software version */ ++ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; ++ U32 v = val; ++ v |= v >> 1; ++ v |= v >> 2; ++ v |= v >> 4; ++ v |= v >> 8; ++ v |= v >> 16; ++ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; ++# endif ++} ++ ++/*===== Local Constants =====*/ ++static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */ ++ ++ ++/*-************************************************************** ++* bitStream encoding ++****************************************************************/ ++/*! BIT_initCStream() : ++ * `dstCapacity` must be > sizeof(void*) ++ * @return : 0 if success, ++ otherwise an error code (can be tested using ERR_isError() ) */ ++MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity) ++{ ++ bitC->bitContainer = 0; ++ bitC->bitPos = 0; ++ bitC->startPtr = (char*)startPtr; ++ bitC->ptr = bitC->startPtr; ++ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr); ++ if (dstCapacity <= sizeof(bitC->ptr)) return ERROR(dstSize_tooSmall); ++ return 0; ++} ++ ++/*! BIT_addBits() : ++ can add up to 26 bits into `bitC`. ++ Does not check for register overflow ! */ ++MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits) ++{ ++ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; ++ bitC->bitPos += nbBits; ++} ++ ++/*! BIT_addBitsFast() : ++ * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ ++MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) ++{ ++ bitC->bitContainer |= value << bitC->bitPos; ++ bitC->bitPos += nbBits; ++} ++ ++/*! BIT_flushBitsFast() : ++ * unsafe version; does not check buffer overflow */ ++MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) ++{ ++ size_t const nbBytes = bitC->bitPos >> 3; ++ MEM_writeLEST(bitC->ptr, bitC->bitContainer); ++ bitC->ptr += nbBytes; ++ bitC->bitPos &= 7; ++ bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ ++} ++ ++/*! BIT_flushBits() : ++ * safe version; check for buffer overflow, and prevents it. ++ * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */ ++MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) ++{ ++ size_t const nbBytes = bitC->bitPos >> 3; ++ MEM_writeLEST(bitC->ptr, bitC->bitContainer); ++ bitC->ptr += nbBytes; ++ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; ++ bitC->bitPos &= 7; ++ bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ ++} ++ ++/*! BIT_closeCStream() : ++ * @return : size of CStream, in bytes, ++ or 0 if it could not fit into dstBuffer */ ++MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) ++{ ++ BIT_addBitsFast(bitC, 1, 1); /* endMark */ ++ BIT_flushBits(bitC); ++ ++ if (bitC->ptr >= bitC->endPtr) return 0; /* doesn't fit within authorized budget : cancel */ ++ ++ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); ++} ++ ++ ++/*-******************************************************** ++* bitStream decoding ++**********************************************************/ ++/*! BIT_initDStream() : ++* Initialize a BIT_DStream_t. ++* `bitD` : a pointer to an already allocated BIT_DStream_t structure. ++* `srcSize` must be the *exact* size of the bitStream, in bytes. ++* @return : size of stream (== srcSize) or an errorCode if a problem is detected ++*/ ++MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) ++{ ++ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } ++ ++ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ ++ bitD->start = (const char*)srcBuffer; ++ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); ++ bitD->bitContainer = MEM_readLEST(bitD->ptr); ++ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; ++ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ ++ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } ++ } else { ++ bitD->start = (const char*)srcBuffer; ++ bitD->ptr = bitD->start; ++ bitD->bitContainer = *(const BYTE*)(bitD->start); ++ switch(srcSize) ++ { ++ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); ++ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); ++ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); ++ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; ++ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; ++ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; ++ default:; ++ } ++ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; ++ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; ++ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } ++ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; ++ } ++ ++ return srcSize; ++} ++ ++MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) ++{ ++ return bitContainer >> start; ++} ++ ++MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) ++{ ++ return (bitContainer >> start) & BIT_mask[nbBits]; ++} ++ ++MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) ++{ ++ return bitContainer & BIT_mask[nbBits]; ++} ++ ++/*! BIT_lookBits() : ++ * Provides next n bits from local register. ++ * local register is not modified. ++ * On 32-bits, maxNbBits==24. ++ * On 64-bits, maxNbBits==56. ++ * @return : value extracted ++ */ ++ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) ++{ ++ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; ++ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); ++} ++ ++/*! BIT_lookBitsFast() : ++* unsafe version; only works only if nbBits >= 1 */ ++MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) ++{ ++ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; ++ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); ++} ++ ++MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) ++{ ++ bitD->bitsConsumed += nbBits; ++} ++ ++/*! BIT_readBits() : ++ * Read (consume) next n bits from local register and update. ++ * Pay attention to not read more than nbBits contained into local register. ++ * @return : extracted value. ++ */ ++MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) ++{ ++ size_t const value = BIT_lookBits(bitD, nbBits); ++ BIT_skipBits(bitD, nbBits); ++ return value; ++} ++ ++/*! BIT_readBitsFast() : ++* unsafe version; only works only if nbBits >= 1 */ ++MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) ++{ ++ size_t const value = BIT_lookBitsFast(bitD, nbBits); ++ BIT_skipBits(bitD, nbBits); ++ return value; ++} ++ ++/*! BIT_reloadDStream() : ++* Refill `bitD` from buffer previously set in BIT_initDStream() . ++* This function is safe, it guarantees it will not read beyond src buffer. ++* @return : status of `BIT_DStream_t` internal register. ++ if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ ++MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) ++{ ++ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */ ++ return BIT_DStream_overflow; ++ ++ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { ++ bitD->ptr -= bitD->bitsConsumed >> 3; ++ bitD->bitsConsumed &= 7; ++ bitD->bitContainer = MEM_readLEST(bitD->ptr); ++ return BIT_DStream_unfinished; ++ } ++ if (bitD->ptr == bitD->start) { ++ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; ++ return BIT_DStream_completed; ++ } ++ { U32 nbBytes = bitD->bitsConsumed >> 3; ++ BIT_DStream_status result = BIT_DStream_unfinished; ++ if (bitD->ptr - nbBytes < bitD->start) { ++ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ ++ result = BIT_DStream_endOfBuffer; ++ } ++ bitD->ptr -= nbBytes; ++ bitD->bitsConsumed -= nbBytes*8; ++ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ ++ return result; ++ } ++} ++ ++/*! BIT_endOfDStream() : ++* @return Tells if DStream has exactly reached its end (all bits consumed). ++*/ ++MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) ++{ ++ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); ++} ++ ++#endif /* BITSTREAM_H_MODULE */ +diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c +new file mode 100644 +index 0000000..79c3207 +--- /dev/null ++++ b/lib/zstd/compress.c +@@ -0,0 +1,3384 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++ ++/*-************************************* ++* Dependencies ++***************************************/ ++#include ++#include ++#include /* memset */ ++#include "mem.h" ++#include "fse.h" ++#include "huf.h" ++#include "zstd_internal.h" /* includes zstd.h */ ++ ++#ifdef current ++# undef current ++#endif ++ ++/*-************************************* ++* Constants ++***************************************/ ++static const U32 g_searchStrength = 8; /* control skip over incompressible data */ ++#define HASH_READ_SIZE 8 ++typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; ++ ++ ++/*-************************************* ++* Helper functions ++***************************************/ ++#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; } ++size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; } ++ ++ ++/*-************************************* ++* Sequence storage ++***************************************/ ++static void ZSTD_resetSeqStore(seqStore_t* ssPtr) ++{ ++ ssPtr->lit = ssPtr->litStart; ++ ssPtr->sequences = ssPtr->sequencesStart; ++ ssPtr->longLengthID = 0; ++} ++ ++ ++/*-************************************* ++* Context memory management ++***************************************/ ++struct ZSTD_CCtx_s { ++ const BYTE* nextSrc; /* next block here to continue on current prefix */ ++ const BYTE* base; /* All regular indexes relative to this position */ ++ const BYTE* dictBase; /* extDict indexes relative to this position */ ++ U32 dictLimit; /* below that point, need extDict */ ++ U32 lowLimit; /* below that point, no more data */ ++ U32 nextToUpdate; /* index from which to continue dictionary update */ ++ U32 nextToUpdate3; /* index from which to continue dictionary update */ ++ U32 hashLog3; /* dispatch table : larger == faster, more memory */ ++ U32 loadedDictEnd; /* index of end of dictionary */ ++ U32 forceWindow; /* force back-references to respect limit of 1<3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); ++ size_t const h3Size = ((size_t)1) << hashLog3; ++ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); ++ size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<customMem = customMem; ++ return cctx; ++} ++ ++ZSTD_CCtx* ZSTD_initCCtx(void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ ZSTD_CCtx* cctx = ZSTD_createCCtx_advanced(stackMem); ++ if (cctx) { ++ cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize); ++ } ++ return cctx; ++} ++ ++size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) ++{ ++ if (cctx==NULL) return 0; /* support free on NULL */ ++ ZSTD_free(cctx->workSpace, cctx->customMem); ++ ZSTD_free(cctx, cctx->customMem); ++ return 0; /* reserved as a potential error code in the future */ ++} ++ ++const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) /* hidden interface */ ++{ ++ return &(ctx->seqStore); ++} ++ ++static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx) ++{ ++ return cctx->params; ++} ++ ++ ++/** ZSTD_checkParams() : ++ ensure param values remain within authorized range. ++ @return : 0, or an error code if one value is beyond authorized range */ ++size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) ++{ ++# define CLAMPCHECK(val,min,max) { if ((valmax)) return ERROR(compressionParameter_unsupported); } ++ CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); ++ CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); ++ CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); ++ CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); ++ CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); ++ CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); ++ if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) return ERROR(compressionParameter_unsupported); ++ return 0; ++} ++ ++ ++/** ZSTD_cycleLog() : ++ * condition for correct operation : hashLog > 1 */ ++static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) ++{ ++ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); ++ return hashLog - btScale; ++} ++ ++/** ZSTD_adjustCParams() : ++ optimize `cPar` for a given input (`srcSize` and `dictSize`). ++ mostly downsizing to reduce memory consumption and initialization. ++ Both `srcSize` and `dictSize` are optional (use 0 if unknown), ++ but if both are 0, no optimization can be done. ++ Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ ++ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) ++{ ++ if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ ++ ++ /* resize params, to use less memory when necessary */ ++ { U32 const minSrcSize = (srcSize==0) ? 500 : 0; ++ U64 const rSize = srcSize + dictSize + minSrcSize; ++ if (rSize < ((U64)1< srcLog) cPar.windowLog = srcLog; ++ } } ++ if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; ++ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); ++ if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); ++ } ++ ++ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ ++ ++ return cPar; ++} ++ ++ ++static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) ++{ ++ return (param1.cParams.hashLog == param2.cParams.hashLog) ++ & (param1.cParams.chainLog == param2.cParams.chainLog) ++ & (param1.cParams.strategy == param2.cParams.strategy) ++ & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3)); ++} ++ ++/*! ZSTD_continueCCtx() : ++ reuse CCtx without reset (note : requires no dictionary) */ ++static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize) ++{ ++ U32 const end = (U32)(cctx->nextSrc - cctx->base); ++ cctx->params = params; ++ cctx->frameContentSize = frameContentSize; ++ cctx->lowLimit = end; ++ cctx->dictLimit = end; ++ cctx->nextToUpdate = end+1; ++ cctx->stage = ZSTDcs_init; ++ cctx->dictID = 0; ++ cctx->loadedDictEnd = 0; ++ { int i; for (i=0; irep[i] = repStartValue[i]; } ++ cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ ++ xxh64_reset(&cctx->xxhState, 0); ++ return 0; ++} ++ ++typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; ++ ++/*! ZSTD_resetCCtx_advanced() : ++ note : `params` must be validated */ ++static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc, ++ ZSTD_parameters params, U64 frameContentSize, ++ ZSTD_compResetPolicy_e const crp) ++{ ++ if (crp == ZSTDcrp_continue) ++ if (ZSTD_equivalentParams(params, zc->params)) { ++ zc->flagStaticTables = 0; ++ zc->flagStaticHufTable = HUF_repeat_none; ++ return ZSTD_continueCCtx(zc, params, frameContentSize); ++ } ++ ++ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); ++ U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; ++ size_t const maxNbSeq = blockSize / divider; ++ size_t const tokenSpace = blockSize + 11*maxNbSeq; ++ size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); ++ size_t const hSize = ((size_t)1) << params.cParams.hashLog; ++ U32 const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); ++ size_t const h3Size = ((size_t)1) << hashLog3; ++ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); ++ void* ptr; ++ ++ /* Check if workSpace is large enough, alloc a new one if needed */ ++ { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<workSpaceSize < neededSpace) { ++ ZSTD_free(zc->workSpace, zc->customMem); ++ zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); ++ if (zc->workSpace == NULL) return ERROR(memory_allocation); ++ zc->workSpaceSize = neededSpace; ++ } } ++ ++ if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace); /* reset tables only */ ++ xxh64_reset(&zc->xxhState, 0); ++ zc->hashLog3 = hashLog3; ++ zc->hashTable = (U32*)(zc->workSpace); ++ zc->chainTable = zc->hashTable + hSize; ++ zc->hashTable3 = zc->chainTable + chainSize; ++ ptr = zc->hashTable3 + h3Size; ++ zc->hufTable = (HUF_CElt*)ptr; ++ zc->flagStaticTables = 0; ++ zc->flagStaticHufTable = HUF_repeat_none; ++ ptr = ((U32*)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */ ++ ++ zc->nextToUpdate = 1; ++ zc->nextSrc = NULL; ++ zc->base = NULL; ++ zc->dictBase = NULL; ++ zc->dictLimit = 0; ++ zc->lowLimit = 0; ++ zc->params = params; ++ zc->blockSize = blockSize; ++ zc->frameContentSize = frameContentSize; ++ { int i; for (i=0; irep[i] = repStartValue[i]; } ++ ++ if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { ++ zc->seqStore.litFreq = (U32*)ptr; ++ zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1); ++ zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1); ++ ptr = zc->seqStore.offCodeFreq + (MaxOff+1); ++ zc->seqStore.matchTable = (ZSTD_match_t*)ptr; ++ ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1; ++ zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr; ++ ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1; ++ zc->seqStore.litLengthSum = 0; ++ } ++ zc->seqStore.sequencesStart = (seqDef*)ptr; ++ ptr = zc->seqStore.sequencesStart + maxNbSeq; ++ zc->seqStore.llCode = (BYTE*) ptr; ++ zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; ++ zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; ++ zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; ++ ++ zc->stage = ZSTDcs_init; ++ zc->dictID = 0; ++ zc->loadedDictEnd = 0; ++ ++ return 0; ++ } ++} ++ ++/* ZSTD_invalidateRepCodes() : ++ * ensures next compression will not use repcodes from previous block. ++ * Note : only works with regular variant; ++ * do not use with extDict variant ! */ ++void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { ++ int i; ++ for (i=0; irep[i] = 0; ++} ++ ++/*! ZSTD_copyCCtx() : ++* Duplicate an existing context `srcCCtx` into another one `dstCCtx`. ++* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). ++* @return : 0, or an error code */ ++size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) ++{ ++ if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); ++ ++ ++ memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); ++ { ZSTD_parameters params = srcCCtx->params; ++ params.fParams.contentSizeFlag = (pledgedSrcSize > 0); ++ ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); ++ } ++ ++ /* copy tables */ ++ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); ++ size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; ++ size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; ++ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); ++ memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace); ++ } ++ ++ /* copy dictionary offsets */ ++ dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; ++ dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3; ++ dstCCtx->nextSrc = srcCCtx->nextSrc; ++ dstCCtx->base = srcCCtx->base; ++ dstCCtx->dictBase = srcCCtx->dictBase; ++ dstCCtx->dictLimit = srcCCtx->dictLimit; ++ dstCCtx->lowLimit = srcCCtx->lowLimit; ++ dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd; ++ dstCCtx->dictID = srcCCtx->dictID; ++ ++ /* copy entropy tables */ ++ dstCCtx->flagStaticTables = srcCCtx->flagStaticTables; ++ dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable; ++ if (srcCCtx->flagStaticTables) { ++ memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable)); ++ memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable)); ++ memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable)); ++ } ++ if (srcCCtx->flagStaticHufTable) { ++ memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4); ++ } ++ ++ return 0; ++} ++ ++ ++/*! ZSTD_reduceTable() : ++* reduce table indexes by `reducerValue` */ ++static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue) ++{ ++ U32 u; ++ for (u=0 ; u < size ; u++) { ++ if (table[u] < reducerValue) table[u] = 0; ++ else table[u] -= reducerValue; ++ } ++} ++ ++/*! ZSTD_reduceIndex() : ++* rescale all indexes to avoid future overflow (indexes are U32) */ ++static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) ++{ ++ { U32 const hSize = 1 << zc->params.cParams.hashLog; ++ ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); } ++ ++ { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); ++ ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); } ++ ++ { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; ++ ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); } ++} ++ ++ ++/*-******************************************************* ++* Block entropic compression ++*********************************************************/ ++ ++/* See doc/zstd_compression_format.md for detailed format description */ ++ ++size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); ++ memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); ++ MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); ++ return ZSTD_blockHeaderSize+srcSize; ++} ++ ++ ++static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ BYTE* const ostart = (BYTE* const)dst; ++ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); ++ ++ if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); ++ ++ switch(flSize) ++ { ++ case 1: /* 2 - 1 - 5 */ ++ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); ++ break; ++ case 2: /* 2 - 2 - 12 */ ++ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); ++ break; ++ default: /*note : should not be necessary : flSize is within {1,2,3} */ ++ case 3: /* 2 - 2 - 20 */ ++ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); ++ break; ++ } ++ ++ memcpy(ostart + flSize, src, srcSize); ++ return srcSize + flSize; ++} ++ ++static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ BYTE* const ostart = (BYTE* const)dst; ++ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); ++ ++ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ ++ ++ switch(flSize) ++ { ++ case 1: /* 2 - 1 - 5 */ ++ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); ++ break; ++ case 2: /* 2 - 2 - 12 */ ++ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); ++ break; ++ default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ ++ case 3: /* 2 - 2 - 20 */ ++ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); ++ break; ++ } ++ ++ ostart[flSize] = *(const BYTE*)src; ++ return flSize+1; ++} ++ ++ ++static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } ++ ++static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize) ++{ ++ size_t const minGain = ZSTD_minGain(srcSize); ++ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); ++ BYTE* const ostart = (BYTE*)dst; ++ U32 singleStream = srcSize < 256; ++ symbolEncodingType_e hType = set_compressed; ++ size_t cLitSize; ++ ++ ++ /* small ? don't even attempt compression (speed opt) */ ++# define LITERAL_NOENTROPY 63 ++ { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; ++ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); ++ } ++ ++ if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ ++ { HUF_repeat repeat = zc->flagStaticHufTable; ++ int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; ++ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; ++ cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) ++ : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat); ++ if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */ ++ else { zc->flagStaticHufTable = HUF_repeat_check; } /* now have a table to reuse */ ++ } ++ ++ if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) { ++ zc->flagStaticHufTable = HUF_repeat_none; ++ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); ++ } ++ if (cLitSize==1) { ++ zc->flagStaticHufTable = HUF_repeat_none; ++ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); ++ } ++ ++ /* Build header */ ++ switch(lhSize) ++ { ++ case 3: /* 2 - 2 - 10 - 10 */ ++ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); ++ MEM_writeLE24(ostart, lhc); ++ break; ++ } ++ case 4: /* 2 - 2 - 14 - 14 */ ++ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); ++ MEM_writeLE32(ostart, lhc); ++ break; ++ } ++ default: /* should not be necessary, lhSize is only {3,4,5} */ ++ case 5: /* 2 - 2 - 18 - 18 */ ++ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); ++ MEM_writeLE32(ostart, lhc); ++ ostart[4] = (BYTE)(cLitSize >> 10); ++ break; ++ } ++ } ++ return lhSize+cLitSize; ++} ++ ++static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7, ++ 8, 9, 10, 11, 12, 13, 14, 15, ++ 16, 16, 17, 17, 18, 18, 19, 19, ++ 20, 20, 20, 20, 21, 21, 21, 21, ++ 22, 22, 22, 22, 22, 22, 22, 22, ++ 23, 23, 23, 23, 23, 23, 23, 23, ++ 24, 24, 24, 24, 24, 24, 24, 24, ++ 24, 24, 24, 24, 24, 24, 24, 24 }; ++ ++static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ++ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ++ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, ++ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, ++ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, ++ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, ++ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, ++ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; ++ ++ ++void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) ++{ ++ BYTE const LL_deltaCode = 19; ++ BYTE const ML_deltaCode = 36; ++ const seqDef* const sequences = seqStorePtr->sequencesStart; ++ BYTE* const llCodeTable = seqStorePtr->llCode; ++ BYTE* const ofCodeTable = seqStorePtr->ofCode; ++ BYTE* const mlCodeTable = seqStorePtr->mlCode; ++ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ++ U32 u; ++ for (u=0; u 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; ++ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); ++ mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; ++ } ++ if (seqStorePtr->longLengthID==1) ++ llCodeTable[seqStorePtr->longLengthPos] = MaxLL; ++ if (seqStorePtr->longLengthID==2) ++ mlCodeTable[seqStorePtr->longLengthPos] = MaxML; ++} ++ ++MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, ++ void* dst, size_t dstCapacity, ++ size_t srcSize) ++{ ++ const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; ++ const seqStore_t* seqStorePtr = &(zc->seqStore); ++ U32 count[MaxSeq+1]; ++ S16 norm[MaxSeq+1]; ++ FSE_CTable* CTable_LitLength = zc->litlengthCTable; ++ FSE_CTable* CTable_OffsetBits = zc->offcodeCTable; ++ FSE_CTable* CTable_MatchLength = zc->matchlengthCTable; ++ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ ++ const seqDef* const sequences = seqStorePtr->sequencesStart; ++ const BYTE* const ofCodeTable = seqStorePtr->ofCode; ++ const BYTE* const llCodeTable = seqStorePtr->llCode; ++ const BYTE* const mlCodeTable = seqStorePtr->mlCode; ++ BYTE* const ostart = (BYTE*)dst; ++ BYTE* const oend = ostart + dstCapacity; ++ BYTE* op = ostart; ++ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; ++ BYTE* seqHead; ++ BYTE scratchBuffer[1<litStart; ++ size_t const litSize = seqStorePtr->lit - literals; ++ size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); ++ if (ZSTD_isError(cSize)) return cSize; ++ op += cSize; ++ } ++ ++ /* Sequences Header */ ++ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall); ++ if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; ++ else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; ++ else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; ++ if (nbSeq==0) goto _check_compressibility; ++ ++ /* seqHead : flags for FSE encoding type */ ++ seqHead = op++; ++ ++#define MIN_SEQ_FOR_DYNAMIC_FSE 64 ++#define MAX_SEQ_FOR_STATIC_FSE 1000 ++ ++ /* convert length/distances into codes */ ++ ZSTD_seqToCodes(seqStorePtr); ++ ++ /* CTable for Literal Lengths */ ++ { U32 max = MaxLL; ++ size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters); ++ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { ++ *op++ = llCodeTable[0]; ++ FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); ++ LLtype = set_rle; ++ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { ++ LLtype = set_repeat; ++ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) { ++ FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); ++ LLtype = set_basic; ++ } else { ++ size_t nbSeq_1 = nbSeq; ++ const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); ++ if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; } ++ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); ++ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ ++ if (FSE_isError(NCountSize)) return NCountSize; ++ op += NCountSize; } ++ FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); ++ LLtype = set_compressed; ++ } } ++ ++ /* CTable for Offsets */ ++ { U32 max = MaxOff; ++ size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters); ++ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { ++ *op++ = ofCodeTable[0]; ++ FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); ++ Offtype = set_rle; ++ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { ++ Offtype = set_repeat; ++ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) { ++ FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); ++ Offtype = set_basic; ++ } else { ++ size_t nbSeq_1 = nbSeq; ++ const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); ++ if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; } ++ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); ++ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ ++ if (FSE_isError(NCountSize)) return NCountSize; ++ op += NCountSize; } ++ FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); ++ Offtype = set_compressed; ++ } } ++ ++ /* CTable for MatchLengths */ ++ { U32 max = MaxML; ++ size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters); ++ if ((mostFrequent == nbSeq) && (nbSeq > 2)) { ++ *op++ = *mlCodeTable; ++ FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); ++ MLtype = set_rle; ++ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { ++ MLtype = set_repeat; ++ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) { ++ FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); ++ MLtype = set_basic; ++ } else { ++ size_t nbSeq_1 = nbSeq; ++ const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); ++ if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; } ++ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); ++ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ ++ if (FSE_isError(NCountSize)) return NCountSize; ++ op += NCountSize; } ++ FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); ++ MLtype = set_compressed; ++ } } ++ ++ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); ++ zc->flagStaticTables = 0; ++ ++ /* Encoding Sequences */ ++ { BIT_CStream_t blockStream; ++ FSE_CState_t stateMatchLength; ++ FSE_CState_t stateOffsetBits; ++ FSE_CState_t stateLitLength; ++ ++ CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */ ++ ++ /* first symbols */ ++ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); ++ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); ++ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); ++ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); ++ if (MEM_32bits()) BIT_flushBits(&blockStream); ++ BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); ++ if (MEM_32bits()) BIT_flushBits(&blockStream); ++ if (longOffsets) { ++ U32 const ofBits = ofCodeTable[nbSeq-1]; ++ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); ++ if (extraBits) { ++ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); ++ BIT_flushBits(&blockStream); ++ } ++ BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ++ ofBits - extraBits); ++ } else { ++ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); ++ } ++ BIT_flushBits(&blockStream); ++ ++ { size_t n; ++ for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) ++ BIT_flushBits(&blockStream); /* (7)*/ ++ BIT_addBits(&blockStream, sequences[n].litLength, llBits); ++ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); ++ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); ++ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ ++ if (longOffsets) { ++ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); ++ if (extraBits) { ++ BIT_addBits(&blockStream, sequences[n].offset, extraBits); ++ BIT_flushBits(&blockStream); /* (7)*/ ++ } ++ BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ++ ofBits - extraBits); /* 31 */ ++ } else { ++ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ ++ } ++ BIT_flushBits(&blockStream); /* (7)*/ ++ } } ++ ++ FSE_flushCState(&blockStream, &stateMatchLength); ++ FSE_flushCState(&blockStream, &stateOffsetBits); ++ FSE_flushCState(&blockStream, &stateLitLength); ++ ++ { size_t const streamSize = BIT_closeCStream(&blockStream); ++ if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ ++ op += streamSize; ++ } } ++ ++ /* check compressibility */ ++_check_compressibility: ++ { size_t const minGain = ZSTD_minGain(srcSize); ++ size_t const maxCSize = srcSize - minGain; ++ if ((size_t)(op-ostart) >= maxCSize) { ++ zc->flagStaticHufTable = HUF_repeat_none; ++ return 0; ++ } } ++ ++ /* confirm repcodes */ ++ { int i; for (i=0; irep[i] = zc->repToConfirm[i]; } ++ ++ return op - ostart; ++} ++ ++#if 0 /* for debug */ ++# define STORESEQ_DEBUG ++U32 g_startDebug = 0; ++const BYTE* g_start = NULL; ++#endif ++ ++/*! ZSTD_storeSeq() : ++ Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. ++ `offsetCode` : distance to match, or 0 == repCode. ++ `matchCode` : matchLength - MINMATCH ++*/ ++MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) ++{ ++#ifdef STORESEQ_DEBUG ++ if (g_startDebug) { ++ const U32 pos = (U32)((const BYTE*)literals - g_start); ++ if (g_start==NULL) g_start = (const BYTE*)literals; ++ if ((pos > 1895000) && (pos < 1895300)) ++ fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", ++ pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); ++ } ++#endif ++ /* copy Literals */ ++ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); ++ seqStorePtr->lit += litLength; ++ ++ /* literal Length */ ++ if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } ++ seqStorePtr->sequences[0].litLength = (U16)litLength; ++ ++ /* match offset */ ++ seqStorePtr->sequences[0].offset = offsetCode + 1; ++ ++ /* match Length */ ++ if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } ++ seqStorePtr->sequences[0].matchLength = (U16)matchCode; ++ ++ seqStorePtr->sequences++; ++} ++ ++ ++/*-************************************* ++* Match length counter ++***************************************/ ++static unsigned ZSTD_NbCommonBytes (register size_t val) ++{ ++ if (MEM_isLittleEndian()) { ++ if (MEM_64bits()) { ++# if defined(_MSC_VER) && defined(_WIN64) ++ unsigned long r = 0; ++ _BitScanForward64( &r, (U64)val ); ++ return (unsigned)(r>>3); ++# elif defined(__GNUC__) && (__GNUC__ >= 3) ++ return (__builtin_ctzll((U64)val) >> 3); ++# else ++ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; ++ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; ++# endif ++ } else { /* 32 bits */ ++# if defined(_MSC_VER) ++ unsigned long r=0; ++ _BitScanForward( &r, (U32)val ); ++ return (unsigned)(r>>3); ++# elif defined(__GNUC__) && (__GNUC__ >= 3) ++ return (__builtin_ctz((U32)val) >> 3); ++# else ++ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; ++ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; ++# endif ++ } ++ } else { /* Big Endian CPU */ ++ if (MEM_64bits()) { ++# if defined(_MSC_VER) && defined(_WIN64) ++ unsigned long r = 0; ++ _BitScanReverse64( &r, val ); ++ return (unsigned)(r>>3); ++# elif defined(__GNUC__) && (__GNUC__ >= 3) ++ return (__builtin_clzll(val) >> 3); ++# else ++ unsigned r; ++ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ ++ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } ++ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } ++ r += (!val); ++ return r; ++# endif ++ } else { /* 32 bits */ ++# if defined(_MSC_VER) ++ unsigned long r = 0; ++ _BitScanReverse( &r, (unsigned long)val ); ++ return (unsigned)(r>>3); ++# elif defined(__GNUC__) && (__GNUC__ >= 3) ++ return (__builtin_clz((U32)val) >> 3); ++# else ++ unsigned r; ++ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } ++ r += (!val); ++ return r; ++# endif ++ } } ++} ++ ++ ++static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) ++{ ++ const BYTE* const pStart = pIn; ++ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); ++ ++ while (pIn < pInLoopLimit) { ++ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); ++ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } ++ pIn += ZSTD_NbCommonBytes(diff); ++ return (size_t)(pIn - pStart); ++ } ++ if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } ++ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } ++ if ((pIn> (32-h) ; } ++MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ ++ ++static const U32 prime4bytes = 2654435761U; ++static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } ++static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } ++ ++static const U64 prime5bytes = 889523592379ULL; ++static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } ++static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } ++ ++static const U64 prime6bytes = 227718039650203ULL; ++static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } ++static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } ++ ++static const U64 prime7bytes = 58295818150454627ULL; ++static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } ++static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } ++ ++static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; ++static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } ++static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } ++ ++static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) ++{ ++ switch(mls) ++ { ++ //case 3: return ZSTD_hash3Ptr(p, hBits); ++ default: ++ case 4: return ZSTD_hash4Ptr(p, hBits); ++ case 5: return ZSTD_hash5Ptr(p, hBits); ++ case 6: return ZSTD_hash6Ptr(p, hBits); ++ case 7: return ZSTD_hash7Ptr(p, hBits); ++ case 8: return ZSTD_hash8Ptr(p, hBits); ++ } ++} ++ ++ ++/*-************************************* ++* Fast Scan ++***************************************/ ++static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls) ++{ ++ U32* const hashTable = zc->hashTable; ++ U32 const hBits = zc->params.cParams.hashLog; ++ const BYTE* const base = zc->base; ++ const BYTE* ip = base + zc->nextToUpdate; ++ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; ++ const size_t fastHashFillStep = 3; ++ ++ while(ip <= iend) { ++ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); ++ ip += fastHashFillStep; ++ } ++} ++ ++ ++FORCE_INLINE ++void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, ++ const void* src, size_t srcSize, ++ const U32 mls) ++{ ++ U32* const hashTable = cctx->hashTable; ++ U32 const hBits = cctx->params.cParams.hashLog; ++ seqStore_t* seqStorePtr = &(cctx->seqStore); ++ const BYTE* const base = cctx->base; ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const U32 lowestIndex = cctx->dictLimit; ++ const BYTE* const lowest = base + lowestIndex; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - HASH_READ_SIZE; ++ U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; ++ U32 offsetSaved = 0; ++ ++ /* init */ ++ ip += (ip==lowest); ++ { U32 const maxRep = (U32)(ip-lowest); ++ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; ++ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; ++ } ++ ++ /* Main Search Loop */ ++ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ ++ size_t mLength; ++ size_t const h = ZSTD_hashPtr(ip, hBits, mls); ++ U32 const current = (U32)(ip-base); ++ U32 const matchIndex = hashTable[h]; ++ const BYTE* match = base + matchIndex; ++ hashTable[h] = current; /* update hash table */ ++ ++ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { ++ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ++ ip++; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); ++ } else { ++ U32 offset; ++ if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; ++ continue; ++ } ++ mLength = ZSTD_count(ip+4, match+4, iend) + 4; ++ offset = (U32)(ip-match); ++ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ ++ offset_2 = offset_1; ++ offset_1 = offset; ++ ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ++ } ++ ++ /* match found */ ++ ip += mLength; ++ anchor = ip; ++ ++ if (ip <= ilimit) { ++ /* Fill Table */ ++ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ ++ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); ++ /* check immediate repcode */ ++ while ( (ip <= ilimit) ++ && ( (offset_2>0) ++ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { ++ /* store sequence */ ++ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; ++ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ ++ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base); ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); ++ ip += rLength; ++ anchor = ip; ++ continue; /* faster when present ... (?) */ ++ } } } ++ ++ /* save reps for next block */ ++ cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; ++ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize) ++{ ++ const U32 mls = ctx->params.cParams.searchLength; ++ switch(mls) ++ { ++ default: /* includes case 3 */ ++ case 4 : ++ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; ++ case 5 : ++ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; ++ case 6 : ++ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; ++ case 7 : ++ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, ++ const U32 mls) ++{ ++ U32* hashTable = ctx->hashTable; ++ const U32 hBits = ctx->params.cParams.hashLog; ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const base = ctx->base; ++ const BYTE* const dictBase = ctx->dictBase; ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const U32 lowestIndex = ctx->lowLimit; ++ const BYTE* const dictStart = dictBase + lowestIndex; ++ const U32 dictLimit = ctx->dictLimit; ++ const BYTE* const lowPrefixPtr = base + dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; ++ ++ /* Search Loop */ ++ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ ++ const size_t h = ZSTD_hashPtr(ip, hBits, mls); ++ const U32 matchIndex = hashTable[h]; ++ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; ++ const BYTE* match = matchBase + matchIndex; ++ const U32 current = (U32)(ip-base); ++ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ ++ const BYTE* repBase = repIndex < dictLimit ? dictBase : base; ++ const BYTE* repMatch = repBase + repIndex; ++ size_t mLength; ++ hashTable[h] = current; /* update hash table */ ++ ++ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) ++ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { ++ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; ++ mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32; ++ ip++; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); ++ } else { ++ if ( (matchIndex < lowestIndex) || ++ (MEM_read32(match) != MEM_read32(ip)) ) { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; ++ continue; ++ } ++ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; ++ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; ++ U32 offset; ++ mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; ++ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ ++ offset = current - matchIndex; ++ offset_2 = offset_1; ++ offset_1 = offset; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ++ } } ++ ++ /* found a match : store it */ ++ ip += mLength; ++ anchor = ip; ++ ++ if (ip <= ilimit) { ++ /* Fill Table */ ++ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; ++ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); ++ /* check immediate repcode */ ++ while (ip <= ilimit) { ++ U32 const current2 = (U32)(ip-base); ++ U32 const repIndex2 = current2 - offset_2; ++ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; ++ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ ++ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { ++ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; ++ size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; ++ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); ++ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; ++ ip += repLength2; ++ anchor = ip; ++ continue; ++ } ++ break; ++ } } } ++ ++ /* save reps for next block */ ++ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize) ++{ ++ U32 const mls = ctx->params.cParams.searchLength; ++ switch(mls) ++ { ++ default: /* includes case 3 */ ++ case 4 : ++ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; ++ case 5 : ++ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; ++ case 6 : ++ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; ++ case 7 : ++ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; ++ } ++} ++ ++ ++/*-************************************* ++* Double Fast ++***************************************/ ++static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls) ++{ ++ U32* const hashLarge = cctx->hashTable; ++ U32 const hBitsL = cctx->params.cParams.hashLog; ++ U32* const hashSmall = cctx->chainTable; ++ U32 const hBitsS = cctx->params.cParams.chainLog; ++ const BYTE* const base = cctx->base; ++ const BYTE* ip = base + cctx->nextToUpdate; ++ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; ++ const size_t fastHashFillStep = 3; ++ ++ while(ip <= iend) { ++ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); ++ hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); ++ ip += fastHashFillStep; ++ } ++} ++ ++ ++FORCE_INLINE ++void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, ++ const void* src, size_t srcSize, ++ const U32 mls) ++{ ++ U32* const hashLong = cctx->hashTable; ++ const U32 hBitsL = cctx->params.cParams.hashLog; ++ U32* const hashSmall = cctx->chainTable; ++ const U32 hBitsS = cctx->params.cParams.chainLog; ++ seqStore_t* seqStorePtr = &(cctx->seqStore); ++ const BYTE* const base = cctx->base; ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const U32 lowestIndex = cctx->dictLimit; ++ const BYTE* const lowest = base + lowestIndex; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - HASH_READ_SIZE; ++ U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; ++ U32 offsetSaved = 0; ++ ++ /* init */ ++ ip += (ip==lowest); ++ { U32 const maxRep = (U32)(ip-lowest); ++ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; ++ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; ++ } ++ ++ /* Main Search Loop */ ++ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ ++ size_t mLength; ++ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); ++ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); ++ U32 const current = (U32)(ip-base); ++ U32 const matchIndexL = hashLong[h2]; ++ U32 const matchIndexS = hashSmall[h]; ++ const BYTE* matchLong = base + matchIndexL; ++ const BYTE* match = base + matchIndexS; ++ hashLong[h2] = hashSmall[h] = current; /* update hash tables */ ++ ++ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */ ++ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ++ ip++; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); ++ } else { ++ U32 offset; ++ if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { ++ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; ++ offset = (U32)(ip-matchLong); ++ while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ ++ } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { ++ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); ++ U32 const matchIndex3 = hashLong[h3]; ++ const BYTE* match3 = base + matchIndex3; ++ hashLong[h3] = current + 1; ++ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { ++ mLength = ZSTD_count(ip+9, match3+8, iend) + 8; ++ ip++; ++ offset = (U32)(ip-match3); ++ while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ ++ } else { ++ mLength = ZSTD_count(ip+4, match+4, iend) + 4; ++ offset = (U32)(ip-match); ++ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ ++ } ++ } else { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; ++ continue; ++ } ++ ++ offset_2 = offset_1; ++ offset_1 = offset; ++ ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ++ } ++ ++ /* match found */ ++ ip += mLength; ++ anchor = ip; ++ ++ if (ip <= ilimit) { ++ /* Fill Table */ ++ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = ++ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ ++ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = ++ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); ++ ++ /* check immediate repcode */ ++ while ( (ip <= ilimit) ++ && ( (offset_2>0) ++ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { ++ /* store sequence */ ++ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; ++ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ ++ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); ++ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); ++ ip += rLength; ++ anchor = ip; ++ continue; /* faster when present ... (?) */ ++ } } } ++ ++ /* save reps for next block */ ++ cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; ++ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ const U32 mls = ctx->params.cParams.searchLength; ++ switch(mls) ++ { ++ default: /* includes case 3 */ ++ case 4 : ++ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; ++ case 5 : ++ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; ++ case 6 : ++ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; ++ case 7 : ++ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, ++ const U32 mls) ++{ ++ U32* const hashLong = ctx->hashTable; ++ U32 const hBitsL = ctx->params.cParams.hashLog; ++ U32* const hashSmall = ctx->chainTable; ++ U32 const hBitsS = ctx->params.cParams.chainLog; ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const base = ctx->base; ++ const BYTE* const dictBase = ctx->dictBase; ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const U32 lowestIndex = ctx->lowLimit; ++ const BYTE* const dictStart = dictBase + lowestIndex; ++ const U32 dictLimit = ctx->dictLimit; ++ const BYTE* const lowPrefixPtr = base + dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; ++ ++ /* Search Loop */ ++ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ ++ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); ++ const U32 matchIndex = hashSmall[hSmall]; ++ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; ++ const BYTE* match = matchBase + matchIndex; ++ ++ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); ++ const U32 matchLongIndex = hashLong[hLong]; ++ const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; ++ const BYTE* matchLong = matchLongBase + matchLongIndex; ++ ++ const U32 current = (U32)(ip-base); ++ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ ++ const BYTE* repBase = repIndex < dictLimit ? dictBase : base; ++ const BYTE* repMatch = repBase + repIndex; ++ size_t mLength; ++ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ ++ ++ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) ++ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { ++ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; ++ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; ++ ip++; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); ++ } else { ++ if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { ++ const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; ++ const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; ++ U32 offset; ++ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; ++ offset = current - matchLongIndex; ++ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ ++ offset_2 = offset_1; ++ offset_1 = offset; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ++ ++ } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { ++ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); ++ U32 const matchIndex3 = hashLong[h3]; ++ const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; ++ const BYTE* match3 = match3Base + matchIndex3; ++ U32 offset; ++ hashLong[h3] = current + 1; ++ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { ++ const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; ++ const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; ++ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; ++ ip++; ++ offset = current+1 - matchIndex3; ++ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ ++ } else { ++ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; ++ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; ++ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; ++ offset = current - matchIndex; ++ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ ++ } ++ offset_2 = offset_1; ++ offset_1 = offset; ++ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ++ ++ } else { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; ++ continue; ++ } } ++ ++ /* found a match : store it */ ++ ip += mLength; ++ anchor = ip; ++ ++ if (ip <= ilimit) { ++ /* Fill Table */ ++ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; ++ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; ++ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); ++ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); ++ /* check immediate repcode */ ++ while (ip <= ilimit) { ++ U32 const current2 = (U32)(ip-base); ++ U32 const repIndex2 = current2 - offset_2; ++ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; ++ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ ++ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { ++ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; ++ size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; ++ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); ++ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; ++ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ++ ip += repLength2; ++ anchor = ip; ++ continue; ++ } ++ break; ++ } } } ++ ++ /* save reps for next block */ ++ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize) ++{ ++ U32 const mls = ctx->params.cParams.searchLength; ++ switch(mls) ++ { ++ default: /* includes case 3 */ ++ case 4 : ++ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; ++ case 5 : ++ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; ++ case 6 : ++ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; ++ case 7 : ++ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; ++ } ++} ++ ++ ++/*-************************************* ++* Binary Tree search ++***************************************/ ++/** ZSTD_insertBt1() : add one or multiple positions to tree. ++* ip : assumed <= iend-8 . ++* @return : nb of positions added */ ++static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares, ++ U32 extDict) ++{ ++ U32* const hashTable = zc->hashTable; ++ U32 const hashLog = zc->params.cParams.hashLog; ++ size_t const h = ZSTD_hashPtr(ip, hashLog, mls); ++ U32* const bt = zc->chainTable; ++ U32 const btLog = zc->params.cParams.chainLog - 1; ++ U32 const btMask = (1 << btLog) - 1; ++ U32 matchIndex = hashTable[h]; ++ size_t commonLengthSmaller=0, commonLengthLarger=0; ++ const BYTE* const base = zc->base; ++ const BYTE* const dictBase = zc->dictBase; ++ const U32 dictLimit = zc->dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const BYTE* match; ++ const U32 current = (U32)(ip-base); ++ const U32 btLow = btMask >= current ? 0 : current - btMask; ++ U32* smallerPtr = bt + 2*(current&btMask); ++ U32* largerPtr = smallerPtr + 1; ++ U32 dummy32; /* to be nullified at the end */ ++ U32 const windowLow = zc->lowLimit; ++ U32 matchEndIdx = current+8; ++ size_t bestLength = 8; ++#ifdef ZSTD_C_PREDICT ++ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); ++ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); ++ predictedSmall += (predictedSmall>0); ++ predictedLarge += (predictedLarge>0); ++#endif /* ZSTD_C_PREDICT */ ++ ++ hashTable[h] = current; /* Update Hash Table */ ++ ++ while (nbCompares-- && (matchIndex > windowLow)) { ++ U32* const nextPtr = bt + 2*(matchIndex & btMask); ++ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ ++ ++#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ ++ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ ++ if (matchIndex == predictedSmall) { ++ /* no need to check length, result known */ ++ *smallerPtr = matchIndex; ++ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ predictedSmall = predictPtr[1] + (predictPtr[1]>0); ++ continue; ++ } ++ if (matchIndex == predictedLarge) { ++ *largerPtr = matchIndex; ++ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ largerPtr = nextPtr; ++ matchIndex = nextPtr[0]; ++ predictedLarge = predictPtr[0] + (predictPtr[0]>0); ++ continue; ++ } ++#endif ++ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { ++ match = base + matchIndex; ++ if (match[matchLength] == ip[matchLength]) ++ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; ++ } else { ++ match = dictBase + matchIndex; ++ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); ++ if (matchIndex+matchLength >= dictLimit) ++ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ ++ } ++ ++ if (matchLength > bestLength) { ++ bestLength = matchLength; ++ if (matchLength > matchEndIdx - matchIndex) ++ matchEndIdx = matchIndex + (U32)matchLength; ++ } ++ ++ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ ++ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ ++ ++ if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ ++ /* match is smaller than current */ ++ *smallerPtr = matchIndex; /* update smaller idx */ ++ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ ++ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ } else { ++ /* match is larger than current */ ++ *largerPtr = matchIndex; ++ commonLengthLarger = matchLength; ++ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ largerPtr = nextPtr; ++ matchIndex = nextPtr[0]; ++ } } ++ ++ *smallerPtr = *largerPtr = 0; ++ if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ ++ if (matchEndIdx > current + 8) return matchEndIdx - current - 8; ++ return 1; ++} ++ ++ ++static size_t ZSTD_insertBtAndFindBestMatch ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iend, ++ size_t* offsetPtr, ++ U32 nbCompares, const U32 mls, ++ U32 extDict) ++{ ++ U32* const hashTable = zc->hashTable; ++ U32 const hashLog = zc->params.cParams.hashLog; ++ size_t const h = ZSTD_hashPtr(ip, hashLog, mls); ++ U32* const bt = zc->chainTable; ++ U32 const btLog = zc->params.cParams.chainLog - 1; ++ U32 const btMask = (1 << btLog) - 1; ++ U32 matchIndex = hashTable[h]; ++ size_t commonLengthSmaller=0, commonLengthLarger=0; ++ const BYTE* const base = zc->base; ++ const BYTE* const dictBase = zc->dictBase; ++ const U32 dictLimit = zc->dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const U32 current = (U32)(ip-base); ++ const U32 btLow = btMask >= current ? 0 : current - btMask; ++ const U32 windowLow = zc->lowLimit; ++ U32* smallerPtr = bt + 2*(current&btMask); ++ U32* largerPtr = bt + 2*(current&btMask) + 1; ++ U32 matchEndIdx = current+8; ++ U32 dummy32; /* to be nullified at the end */ ++ size_t bestLength = 0; ++ ++ hashTable[h] = current; /* Update Hash Table */ ++ ++ while (nbCompares-- && (matchIndex > windowLow)) { ++ U32* const nextPtr = bt + 2*(matchIndex & btMask); ++ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ ++ const BYTE* match; ++ ++ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { ++ match = base + matchIndex; ++ if (match[matchLength] == ip[matchLength]) ++ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; ++ } else { ++ match = dictBase + matchIndex; ++ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); ++ if (matchIndex+matchLength >= dictLimit) ++ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ ++ } ++ ++ if (matchLength > bestLength) { ++ if (matchLength > matchEndIdx - matchIndex) ++ matchEndIdx = matchIndex + (U32)matchLength; ++ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) ++ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; ++ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ ++ break; /* drop, to guarantee consistency (miss a little bit of compression) */ ++ } ++ ++ if (match[matchLength] < ip[matchLength]) { ++ /* match is smaller than current */ ++ *smallerPtr = matchIndex; /* update smaller idx */ ++ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ ++ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ } else { ++ /* match is larger than current */ ++ *largerPtr = matchIndex; ++ commonLengthLarger = matchLength; ++ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ largerPtr = nextPtr; ++ matchIndex = nextPtr[0]; ++ } } ++ ++ *smallerPtr = *largerPtr = 0; ++ ++ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; ++ return bestLength; ++} ++ ++ ++static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) ++{ ++ const BYTE* const base = zc->base; ++ const U32 target = (U32)(ip - base); ++ U32 idx = zc->nextToUpdate; ++ ++ while(idx < target) ++ idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0); ++} ++ ++/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ ++static size_t ZSTD_BtFindBestMatch ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 mls) ++{ ++ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ ++ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); ++ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); ++} ++ ++ ++static size_t ZSTD_BtFindBestMatch_selectMLS ( ++ ZSTD_CCtx* zc, /* Index table will be updated */ ++ const BYTE* ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 matchLengthSearch) ++{ ++ switch(matchLengthSearch) ++ { ++ default : /* includes case 3 */ ++ case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); ++ case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); ++ case 7 : ++ case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); ++ } ++} ++ ++ ++static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) ++{ ++ const BYTE* const base = zc->base; ++ const U32 target = (U32)(ip - base); ++ U32 idx = zc->nextToUpdate; ++ ++ while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1); ++} ++ ++ ++/** Tree updater, providing best match */ ++static size_t ZSTD_BtFindBestMatch_extDict ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 mls) ++{ ++ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ ++ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); ++ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); ++} ++ ++ ++static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( ++ ZSTD_CCtx* zc, /* Index table will be updated */ ++ const BYTE* ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 matchLengthSearch) ++{ ++ switch(matchLengthSearch) ++ { ++ default : /* includes case 3 */ ++ case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); ++ case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); ++ case 7 : ++ case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); ++ } ++} ++ ++ ++ ++/* ********************************* ++* Hash Chain ++***********************************/ ++#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] ++ ++/* Update chains up to ip (excluded) ++ Assumption : always within prefix (i.e. not within extDict) */ ++FORCE_INLINE ++U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls) ++{ ++ U32* const hashTable = zc->hashTable; ++ const U32 hashLog = zc->params.cParams.hashLog; ++ U32* const chainTable = zc->chainTable; ++ const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; ++ const BYTE* const base = zc->base; ++ const U32 target = (U32)(ip - base); ++ U32 idx = zc->nextToUpdate; ++ ++ while(idx < target) { /* catch up */ ++ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); ++ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; ++ hashTable[h] = idx; ++ idx++; ++ } ++ ++ zc->nextToUpdate = target; ++ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; ++} ++ ++ ++ ++FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */ ++size_t ZSTD_HcFindBestMatch_generic ( ++ ZSTD_CCtx* zc, /* Index table will be updated */ ++ const BYTE* const ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 mls, const U32 extDict) ++{ ++ U32* const chainTable = zc->chainTable; ++ const U32 chainSize = (1 << zc->params.cParams.chainLog); ++ const U32 chainMask = chainSize-1; ++ const BYTE* const base = zc->base; ++ const BYTE* const dictBase = zc->dictBase; ++ const U32 dictLimit = zc->dictLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const U32 lowLimit = zc->lowLimit; ++ const U32 current = (U32)(ip-base); ++ const U32 minChain = current > chainSize ? current - chainSize : 0; ++ int nbAttempts=maxNbAttempts; ++ size_t ml=EQUAL_READ32-1; ++ ++ /* HC4 match finder */ ++ U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls); ++ ++ for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { ++ const BYTE* match; ++ size_t currentMl=0; ++ if ((!extDict) || matchIndex >= dictLimit) { ++ match = base + matchIndex; ++ if (match[ml] == ip[ml]) /* potentially better */ ++ currentMl = ZSTD_count(ip, match, iLimit); ++ } else { ++ match = dictBase + matchIndex; ++ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ ++ currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; ++ } ++ ++ /* save best solution */ ++ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ } ++ ++ if (matchIndex <= minChain) break; ++ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); ++ } ++ ++ return ml; ++} ++ ++ ++FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS ( ++ ZSTD_CCtx* zc, ++ const BYTE* ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 matchLengthSearch) ++{ ++ switch(matchLengthSearch) ++ { ++ default : /* includes case 3 */ ++ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); ++ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); ++ case 7 : ++ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); ++ } ++} ++ ++ ++FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( ++ ZSTD_CCtx* zc, ++ const BYTE* ip, const BYTE* const iLimit, ++ size_t* offsetPtr, ++ const U32 maxNbAttempts, const U32 matchLengthSearch) ++{ ++ switch(matchLengthSearch) ++ { ++ default : /* includes case 3 */ ++ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); ++ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); ++ case 7 : ++ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); ++ } ++} ++ ++ ++/* ******************************* ++* Common parser - lazy strategy ++*********************************/ ++FORCE_INLINE ++void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, ++ const U32 searchMethod, const U32 depth) ++{ ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ const BYTE* const base = ctx->base + ctx->dictLimit; ++ ++ U32 const maxSearches = 1 << ctx->params.cParams.searchLog; ++ U32 const mls = ctx->params.cParams.searchLength; ++ ++ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, ++ size_t* offsetPtr, ++ U32 maxNbAttempts, U32 matchLengthSearch); ++ searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; ++ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0; ++ ++ /* init */ ++ ip += (ip==base); ++ ctx->nextToUpdate3 = ctx->nextToUpdate; ++ { U32 const maxRep = (U32)(ip-base); ++ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; ++ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; ++ } ++ ++ /* Match Loop */ ++ while (ip < ilimit) { ++ size_t matchLength=0; ++ size_t offset=0; ++ const BYTE* start=ip+1; ++ ++ /* check repCode */ ++ if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { ++ /* repcode : we take it */ ++ matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; ++ if (depth==0) goto _storeSequence; ++ } ++ ++ /* first search (depth 0) */ ++ { size_t offsetFound = 99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); ++ if (ml2 > matchLength) ++ matchLength = ml2, start = ip, offset=offsetFound; ++ } ++ ++ if (matchLength < EQUAL_READ32) { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ ++ continue; ++ } ++ ++ /* let's try to find a better solution */ ++ if (depth>=1) ++ while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { ++ size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; ++ int const gain2 = (int)(mlRep * 3); ++ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); ++ if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) ++ matchLength = mlRep, offset = 0, start = ip; ++ } ++ { size_t offset2=99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); ++ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ ++ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); ++ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { ++ matchLength = ml2, offset = offset2, start = ip; ++ continue; /* search a better one */ ++ } } ++ ++ /* let's find an even better one */ ++ if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { ++ size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; ++ int const gain2 = (int)(ml2 * 4); ++ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); ++ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) ++ matchLength = ml2, offset = 0, start = ip; ++ } ++ { size_t offset2=99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); ++ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ ++ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); ++ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { ++ matchLength = ml2, offset = offset2, start = ip; ++ continue; ++ } } } ++ break; /* nothing found : store previous solution */ ++ } ++ ++ /* catch up */ ++ if (offset) { ++ while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE])) /* only search for offset within prefix */ ++ { start--; matchLength++; } ++ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); ++ } ++ ++ /* store sequence */ ++_storeSequence: ++ { size_t const litLength = start - anchor; ++ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); ++ anchor = ip = start + matchLength; ++ } ++ ++ /* check immediate repcode */ ++ while ( (ip <= ilimit) ++ && ((offset_2>0) ++ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { ++ /* store sequence */ ++ matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32; ++ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); ++ ip += matchLength; ++ anchor = ip; ++ continue; /* faster when present ... (?) */ ++ } } ++ ++ /* Save reps for next block */ ++ ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; ++ ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); ++} ++ ++static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); ++} ++ ++static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); ++} ++ ++static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); ++} ++ ++ ++FORCE_INLINE ++void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, ++ const U32 searchMethod, const U32 depth) ++{ ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ const BYTE* const base = ctx->base; ++ const U32 dictLimit = ctx->dictLimit; ++ const U32 lowestIndex = ctx->lowLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const BYTE* const dictBase = ctx->dictBase; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const dictStart = dictBase + ctx->lowLimit; ++ ++ const U32 maxSearches = 1 << ctx->params.cParams.searchLog; ++ const U32 mls = ctx->params.cParams.searchLength; ++ ++ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, ++ size_t* offsetPtr, ++ U32 maxNbAttempts, U32 matchLengthSearch); ++ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; ++ ++ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; ++ ++ /* init */ ++ ctx->nextToUpdate3 = ctx->nextToUpdate; ++ ip += (ip == prefixStart); ++ ++ /* Match Loop */ ++ while (ip < ilimit) { ++ size_t matchLength=0; ++ size_t offset=0; ++ const BYTE* start=ip+1; ++ U32 current = (U32)(ip-base); ++ ++ /* check repCode */ ++ { const U32 repIndex = (U32)(current+1 - offset_1); ++ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; ++ const BYTE* const repMatch = repBase + repIndex; ++ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ ++ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { ++ /* repcode detected we should take it */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; ++ if (depth==0) goto _storeSequence; ++ } } ++ ++ /* first search (depth 0) */ ++ { size_t offsetFound = 99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); ++ if (ml2 > matchLength) ++ matchLength = ml2, start = ip, offset=offsetFound; ++ } ++ ++ if (matchLength < EQUAL_READ32) { ++ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ ++ continue; ++ } ++ ++ /* let's try to find a better solution */ ++ if (depth>=1) ++ while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ ++ if (MEM_read32(ip) == MEM_read32(repMatch)) { ++ /* repcode detected */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; ++ int const gain2 = (int)(repLength * 3); ++ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); ++ if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) ++ matchLength = repLength, offset = 0, start = ip; ++ } } ++ ++ /* search match, depth 1 */ ++ { size_t offset2=99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); ++ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ ++ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); ++ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { ++ matchLength = ml2, offset = offset2, start = ip; ++ continue; /* search a better one */ ++ } } ++ ++ /* let's find an even better one */ ++ if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ ++ if (MEM_read32(ip) == MEM_read32(repMatch)) { ++ /* repcode detected */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; ++ int gain2 = (int)(repLength * 4); ++ int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); ++ if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) ++ matchLength = repLength, offset = 0, start = ip; ++ } } ++ ++ /* search match, depth 2 */ ++ { size_t offset2=99999999; ++ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); ++ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ ++ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); ++ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { ++ matchLength = ml2, offset = offset2, start = ip; ++ continue; ++ } } } ++ break; /* nothing found : store previous solution */ ++ } ++ ++ /* catch up */ ++ if (offset) { ++ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); ++ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; ++ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; ++ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ ++ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); ++ } ++ ++ /* store sequence */ ++_storeSequence: ++ { size_t const litLength = start - anchor; ++ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); ++ anchor = ip = start + matchLength; ++ } ++ ++ /* check immediate repcode */ ++ while (ip <= ilimit) { ++ const U32 repIndex = (U32)((ip-base) - offset_2); ++ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; ++ const BYTE* const repMatch = repBase + repIndex; ++ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ ++ if (MEM_read32(ip) == MEM_read32(repMatch)) { ++ /* repcode detected we should take it */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; ++ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ ++ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); ++ ip += matchLength; ++ anchor = ip; ++ continue; /* faster when present ... (?) */ ++ } ++ break; ++ } } ++ ++ /* Save reps for next block */ ++ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); ++} ++ ++static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); ++} ++ ++static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); ++} ++ ++static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); ++} ++ ++ ++/* The optimal parser */ ++#include "zstd_opt.h" ++ ++static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++#ifdef ZSTD_OPT_H_91842398743 ++ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); ++#else ++ (void)ctx; (void)src; (void)srcSize; ++ return; ++#endif ++} ++ ++static void ZSTD_compressBlock_btopt2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++#ifdef ZSTD_OPT_H_91842398743 ++ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); ++#else ++ (void)ctx; (void)src; (void)srcSize; ++ return; ++#endif ++} ++ ++static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++#ifdef ZSTD_OPT_H_91842398743 ++ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); ++#else ++ (void)ctx; (void)src; (void)srcSize; ++ return; ++#endif ++} ++ ++static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) ++{ ++#ifdef ZSTD_OPT_H_91842398743 ++ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); ++#else ++ (void)ctx; (void)src; (void)srcSize; ++ return; ++#endif ++} ++ ++ ++typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); ++ ++static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) ++{ ++ static const ZSTD_blockCompressor blockCompressor[2][8] = { ++ { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2 }, ++ { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict } ++ }; ++ ++ return blockCompressor[extDict][(U32)strat]; ++} ++ ++ ++static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); ++ const BYTE* const base = zc->base; ++ const BYTE* const istart = (const BYTE*)src; ++ const U32 current = (U32)(istart-base); ++ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */ ++ ZSTD_resetSeqStore(&(zc->seqStore)); ++ if (current > zc->nextToUpdate + 384) ++ zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ ++ blockCompressor(zc, src, srcSize); ++ return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); ++} ++ ++ ++/*! ZSTD_compress_generic() : ++* Compress a chunk of data into one or multiple blocks. ++* All blocks will be terminated, all input will be consumed. ++* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. ++* Frame is supposed already started (header already produced) ++* @return : compressed size, or an error code ++*/ ++static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ U32 lastFrameChunk) ++{ ++ size_t blockSize = cctx->blockSize; ++ size_t remaining = srcSize; ++ const BYTE* ip = (const BYTE*)src; ++ BYTE* const ostart = (BYTE*)dst; ++ BYTE* op = ostart; ++ U32 const maxDist = 1 << cctx->params.cParams.windowLog; ++ ++ if (cctx->params.fParams.checksumFlag && srcSize) ++ xxh64_update(&cctx->xxhState, src, srcSize); ++ ++ while (remaining) { ++ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); ++ size_t cSize; ++ ++ if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ ++ if (remaining < blockSize) blockSize = remaining; ++ ++ /* preemptive overflow correction */ ++ if (cctx->lowLimit > (3U<<29)) { ++ U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; ++ U32 const current = (U32)(ip - cctx->base); ++ U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog); ++ U32 const correction = current - newCurrent; ++ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); ++ ZSTD_reduceIndex(cctx, correction); ++ cctx->base += correction; ++ cctx->dictBase += correction; ++ cctx->lowLimit -= correction; ++ cctx->dictLimit -= correction; ++ if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0; ++ else cctx->nextToUpdate -= correction; ++ } ++ ++ if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { ++ /* enforce maxDist */ ++ U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist; ++ if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit; ++ if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit; ++ } ++ ++ cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize); ++ if (ZSTD_isError(cSize)) return cSize; ++ ++ if (cSize == 0) { /* block is not compressible */ ++ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3); ++ if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); ++ MEM_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ ++ memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); ++ cSize = ZSTD_blockHeaderSize+blockSize; ++ } else { ++ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); ++ MEM_writeLE24(op, cBlockHeader24); ++ cSize += ZSTD_blockHeaderSize; ++ } ++ ++ remaining -= blockSize; ++ dstCapacity -= cSize; ++ ip += blockSize; ++ op += cSize; ++ } ++ ++ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; ++ return op-ostart; ++} ++ ++ ++static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, ++ ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) ++{ BYTE* const op = (BYTE*)dst; ++ U32 const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ ++ U32 const checksumFlag = params.fParams.checksumFlag>0; ++ U32 const windowSize = 1U << params.cParams.windowLog; ++ U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); ++ BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); ++ U32 const fcsCode = params.fParams.contentSizeFlag ? ++ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : /* 0-3 */ ++ 0; ++ BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); ++ size_t pos; ++ ++ if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); ++ ++ MEM_writeLE32(dst, ZSTD_MAGICNUMBER); ++ op[4] = frameHeaderDecriptionByte; pos=5; ++ if (!singleSegment) op[pos++] = windowLogByte; ++ switch(dictIDSizeCode) ++ { ++ default: /* impossible */ ++ case 0 : break; ++ case 1 : op[pos] = (BYTE)(dictID); pos++; break; ++ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; ++ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; ++ } ++ switch(fcsCode) ++ { ++ default: /* impossible */ ++ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; ++ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; ++ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; ++ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; ++ } ++ return pos; ++} ++ ++ ++static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ U32 frame, U32 lastFrameChunk) ++{ ++ const BYTE* const ip = (const BYTE*) src; ++ size_t fhSize = 0; ++ ++ if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ ++ ++ if (frame && (cctx->stage==ZSTDcs_init)) { ++ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); ++ if (ZSTD_isError(fhSize)) return fhSize; ++ dstCapacity -= fhSize; ++ dst = (char*)dst + fhSize; ++ cctx->stage = ZSTDcs_ongoing; ++ } ++ ++ /* Check if blocks follow each other */ ++ if (src != cctx->nextSrc) { ++ /* not contiguous */ ++ ptrdiff_t const delta = cctx->nextSrc - ip; ++ cctx->lowLimit = cctx->dictLimit; ++ cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); ++ cctx->dictBase = cctx->base; ++ cctx->base -= delta; ++ cctx->nextToUpdate = cctx->dictLimit; ++ if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit; /* too small extDict */ ++ } ++ ++ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ ++ if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { ++ ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; ++ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; ++ cctx->lowLimit = lowLimitMax; ++ } ++ ++ cctx->nextSrc = ip + srcSize; ++ ++ if (srcSize) { ++ size_t const cSize = frame ? ++ ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ++ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); ++ if (ZSTD_isError(cSize)) return cSize; ++ return cSize + fhSize; ++ } else ++ return fhSize; ++} ++ ++ ++size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize) ++{ ++ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); ++} ++ ++ ++size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx) ++{ ++ return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); ++} ++ ++size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); ++ if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); ++ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); ++} ++ ++/*! ZSTD_loadDictionaryContent() : ++ * @return : 0, or an error code ++ */ ++static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize) ++{ ++ const BYTE* const ip = (const BYTE*) src; ++ const BYTE* const iend = ip + srcSize; ++ ++ /* input becomes current prefix */ ++ zc->lowLimit = zc->dictLimit; ++ zc->dictLimit = (U32)(zc->nextSrc - zc->base); ++ zc->dictBase = zc->base; ++ zc->base += ip - zc->nextSrc; ++ zc->nextToUpdate = zc->dictLimit; ++ zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); ++ ++ zc->nextSrc = iend; ++ if (srcSize <= HASH_READ_SIZE) return 0; ++ ++ switch(zc->params.cParams.strategy) ++ { ++ case ZSTD_fast: ++ ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength); ++ break; ++ ++ case ZSTD_dfast: ++ ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength); ++ break; ++ ++ case ZSTD_greedy: ++ case ZSTD_lazy: ++ case ZSTD_lazy2: ++ if (srcSize >= HASH_READ_SIZE) ++ ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength); ++ break; ++ ++ case ZSTD_btlazy2: ++ case ZSTD_btopt: ++ case ZSTD_btopt2: ++ if (srcSize >= HASH_READ_SIZE) ++ ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); ++ break; ++ ++ default: ++ return ERROR(GENERIC); /* strategy doesn't exist; impossible */ ++ } ++ ++ zc->nextToUpdate = (U32)(iend - zc->base); ++ return 0; ++} ++ ++ ++/* Dictionaries that assign zero probability to symbols that show up causes problems ++ when FSE encoding. Refuse dictionaries that assign zero probability to symbols ++ that we may encounter during compression. ++ NOTE: This behavior is not standard and could be improved in the future. */ ++static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { ++ U32 s; ++ if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); ++ for (s = 0; s <= maxSymbolValue; ++s) { ++ if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); ++ } ++ return 0; ++} ++ ++ ++/* Dictionary format : ++ * See : ++ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format ++ */ ++/*! ZSTD_loadZstdDictionary() : ++ * @return : 0, or an error code ++ * assumptions : magic number supposed already checked ++ * dictSize supposed > 8 ++ */ ++static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) ++{ ++ const BYTE* dictPtr = (const BYTE*)dict; ++ const BYTE* const dictEnd = dictPtr + dictSize; ++ short offcodeNCount[MaxOff+1]; ++ unsigned offcodeMaxValue = MaxOff; ++ BYTE scratchBuffer[1<dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); ++ dictPtr += 4; ++ ++ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); ++ if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); ++ dictPtr += hufHeaderSize; ++ } ++ ++ { unsigned offcodeLog; ++ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); ++ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); ++ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ ++ CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); ++ dictPtr += offcodeHeaderSize; ++ } ++ ++ { short matchlengthNCount[MaxML+1]; ++ unsigned matchlengthMaxValue = MaxML, matchlengthLog; ++ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); ++ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); ++ /* Every match length code must have non-zero probability */ ++ CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); ++ CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); ++ dictPtr += matchlengthHeaderSize; ++ } ++ ++ { short litlengthNCount[MaxLL+1]; ++ unsigned litlengthMaxValue = MaxLL, litlengthLog; ++ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); ++ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); ++ /* Every literal length code must have non-zero probability */ ++ CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); ++ CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); ++ dictPtr += litlengthHeaderSize; ++ } ++ ++ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); ++ cctx->rep[0] = MEM_readLE32(dictPtr+0); ++ cctx->rep[1] = MEM_readLE32(dictPtr+4); ++ cctx->rep[2] = MEM_readLE32(dictPtr+8); ++ dictPtr += 12; ++ ++ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); ++ U32 offcodeMax = MaxOff; ++ if (dictContentSize <= ((U32)-1) - 128 KB) { ++ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ ++ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ ++ } ++ /* All offset values <= dictContentSize + 128 KB must be representable */ ++ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); ++ /* All repCodes must be <= dictContentSize and != 0*/ ++ { U32 u; ++ for (u=0; u<3; u++) { ++ if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted); ++ if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); ++ } } ++ ++ cctx->flagStaticTables = 1; ++ cctx->flagStaticHufTable = HUF_repeat_valid; ++ return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); ++ } ++} ++ ++/** ZSTD_compress_insertDictionary() : ++* @return : 0, or an error code */ ++static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) ++{ ++ if ((dict==NULL) || (dictSize<=8)) return 0; ++ ++ /* dict as pure content */ ++ if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) ++ return ZSTD_loadDictionaryContent(cctx, dict, dictSize); ++ ++ /* dict as zstd dictionary */ ++ return ZSTD_loadZstdDictionary(cctx, dict, dictSize); ++} ++ ++/*! ZSTD_compressBegin_internal() : ++* @return : 0, or an error code */ ++static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, ++ const void* dict, size_t dictSize, ++ ZSTD_parameters params, U64 pledgedSrcSize) ++{ ++ ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; ++ CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp)); ++ return ZSTD_compress_insertDictionary(cctx, dict, dictSize); ++} ++ ++ ++/*! ZSTD_compressBegin_advanced() : ++* @return : 0, or an error code */ ++size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, ++ const void* dict, size_t dictSize, ++ ZSTD_parameters params, unsigned long long pledgedSrcSize) ++{ ++ /* compression parameters verification and optimization */ ++ CHECK_F(ZSTD_checkCParams(params.cParams)); ++ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); ++} ++ ++ ++size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) ++{ ++ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); ++ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); ++} ++ ++ ++size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) ++{ ++ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); ++} ++ ++ ++/*! ZSTD_writeEpilogue() : ++* Ends a frame. ++* @return : nb of bytes written into dst (or an error code) */ ++static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) ++{ ++ BYTE* const ostart = (BYTE*)dst; ++ BYTE* op = ostart; ++ size_t fhSize = 0; ++ ++ if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ ++ ++ /* special case : empty frame */ ++ if (cctx->stage == ZSTDcs_init) { ++ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); ++ if (ZSTD_isError(fhSize)) return fhSize; ++ dstCapacity -= fhSize; ++ op += fhSize; ++ cctx->stage = ZSTDcs_ongoing; ++ } ++ ++ if (cctx->stage != ZSTDcs_ending) { ++ /* write one last empty block, make it the "last" block */ ++ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; ++ if (dstCapacity<4) return ERROR(dstSize_tooSmall); ++ MEM_writeLE32(op, cBlockHeader24); ++ op += ZSTD_blockHeaderSize; ++ dstCapacity -= ZSTD_blockHeaderSize; ++ } ++ ++ if (cctx->params.fParams.checksumFlag) { ++ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); ++ if (dstCapacity<4) return ERROR(dstSize_tooSmall); ++ MEM_writeLE32(op, checksum); ++ op += 4; ++ } ++ ++ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ ++ return op-ostart; ++} ++ ++ ++size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize) ++{ ++ size_t endResult; ++ size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); ++ if (ZSTD_isError(cSize)) return cSize; ++ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); ++ if (ZSTD_isError(endResult)) return endResult; ++ return cSize + endResult; ++} ++ ++ ++static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ const void* dict,size_t dictSize, ++ ZSTD_parameters params) ++{ ++ CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); ++ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); ++} ++ ++size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, ZSTD_parameters params) ++{ ++ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); ++} ++ ++ ++size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, ZSTD_parameters params) ++{ ++ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params); ++} ++ ++ ++/* ===== Dictionary API ===== */ ++ ++struct ZSTD_CDict_s { ++ void* dictBuffer; ++ const void* dictContent; ++ size_t dictContentSize; ++ ZSTD_CCtx* refContext; ++}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ ++ ++size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) ++{ ++ return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); ++} ++ ++static ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference, ++ ZSTD_parameters params, ZSTD_customMem customMem) ++{ ++ if (!customMem.customAlloc || !customMem.customFree) return NULL; ++ ++ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); ++ ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); ++ ++ if (!cdict || !cctx) { ++ ZSTD_free(cdict, customMem); ++ ZSTD_freeCCtx(cctx); ++ return NULL; ++ } ++ ++ if ((byReference) || (!dictBuffer) || (!dictSize)) { ++ cdict->dictBuffer = NULL; ++ cdict->dictContent = dictBuffer; ++ } else { ++ void* const internalBuffer = ZSTD_malloc(dictSize, customMem); ++ if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; } ++ memcpy(internalBuffer, dictBuffer, dictSize); ++ cdict->dictBuffer = internalBuffer; ++ cdict->dictContent = internalBuffer; ++ } ++ ++ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); ++ if (ZSTD_isError(errorCode)) { ++ ZSTD_free(cdict->dictBuffer, customMem); ++ ZSTD_free(cdict, customMem); ++ ZSTD_freeCCtx(cctx); ++ return NULL; ++ } } ++ ++ cdict->refContext = cctx; ++ cdict->dictContentSize = dictSize; ++ return cdict; ++ } ++} ++ ++ZSTD_CDict* ZSTD_initCDict(const void* dict, size_t dictSize, ZSTD_parameters params, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem); ++} ++ ++size_t ZSTD_freeCDict(ZSTD_CDict* cdict) ++{ ++ if (cdict==NULL) return 0; /* support free on NULL */ ++ { ZSTD_customMem const cMem = cdict->refContext->customMem; ++ ZSTD_freeCCtx(cdict->refContext); ++ ZSTD_free(cdict->dictBuffer, cMem); ++ ZSTD_free(cdict, cMem); ++ return 0; ++ } ++} ++ ++static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) { ++ return ZSTD_getParamsFromCCtx(cdict->refContext); ++} ++ ++size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize) ++{ ++ if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) ++ else { ++ ZSTD_parameters params = cdict->refContext->params; ++ params.fParams.contentSizeFlag = (pledgedSrcSize > 0); ++ CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize)); ++ } ++ return 0; ++} ++ ++/*! ZSTD_compress_usingCDict() : ++* Compression using a digested Dictionary. ++* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. ++* Note that compression level is decided during dictionary creation */ ++size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ const ZSTD_CDict* cdict) ++{ ++ CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize)); ++ ++ if (cdict->refContext->params.fParams.contentSizeFlag==1) { ++ cctx->params.fParams.contentSizeFlag = 1; ++ cctx->frameContentSize = srcSize; ++ } else { ++ cctx->params.fParams.contentSizeFlag = 0; ++ } ++ ++ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); ++} ++ ++ ++ ++/* ****************************************************************** ++* Streaming ++********************************************************************/ ++ ++typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; ++ ++struct ZSTD_CStream_s { ++ ZSTD_CCtx* cctx; ++ ZSTD_CDict* cdictLocal; ++ const ZSTD_CDict* cdict; ++ char* inBuff; ++ size_t inBuffSize; ++ size_t inToCompress; ++ size_t inBuffPos; ++ size_t inBuffTarget; ++ size_t blockSize; ++ char* outBuff; ++ size_t outBuffSize; ++ size_t outBuffContentSize; ++ size_t outBuffFlushedSize; ++ ZSTD_cStreamStage stage; ++ U32 checksum; ++ U32 frameEnded; ++ U64 pledgedSrcSize; ++ U64 inputProcessed; ++ ZSTD_parameters params; ++ ZSTD_customMem customMem; ++}; /* typedef'd to ZSTD_CStream within "zstd.h" */ ++ ++size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams) ++{ ++ size_t const inBuffSize = (size_t)1 << cParams.windowLog; ++ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize); ++ size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; ++ ++ return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); ++} ++ ++ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) ++{ ++ ZSTD_CStream* zcs; ++ ++ if (!customMem.customAlloc || !customMem.customFree) return NULL; ++ ++ zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); ++ if (zcs==NULL) return NULL; ++ memset(zcs, 0, sizeof(ZSTD_CStream)); ++ memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); ++ zcs->cctx = ZSTD_createCCtx_advanced(customMem); ++ if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; } ++ return zcs; ++} ++ ++size_t ZSTD_freeCStream(ZSTD_CStream* zcs) ++{ ++ if (zcs==NULL) return 0; /* support free on NULL */ ++ { ZSTD_customMem const cMem = zcs->customMem; ++ ZSTD_freeCCtx(zcs->cctx); ++ zcs->cctx = NULL; ++ ZSTD_freeCDict(zcs->cdictLocal); ++ zcs->cdictLocal = NULL; ++ ZSTD_free(zcs->inBuff, cMem); ++ zcs->inBuff = NULL; ++ ZSTD_free(zcs->outBuff, cMem); ++ zcs->outBuff = NULL; ++ ZSTD_free(zcs, cMem); ++ return 0; ++ } ++} ++ ++ ++/*====== Initialization ======*/ ++ ++size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } ++size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } ++ ++static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) ++{ ++ if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ ++ ++ if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) ++ else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); ++ ++ zcs->inToCompress = 0; ++ zcs->inBuffPos = 0; ++ zcs->inBuffTarget = zcs->blockSize; ++ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; ++ zcs->stage = zcss_load; ++ zcs->frameEnded = 0; ++ zcs->pledgedSrcSize = pledgedSrcSize; ++ zcs->inputProcessed = 0; ++ return 0; /* ready to go */ ++} ++ ++size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) ++{ ++ ++ zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); ++ ++ return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); ++} ++ ++static size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, ++ const void* dict, size_t dictSize, ++ ZSTD_parameters params, unsigned long long pledgedSrcSize) ++{ ++ /* allocate buffers */ ++ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; ++ if (zcs->inBuffSize < neededInBuffSize) { ++ zcs->inBuffSize = neededInBuffSize; ++ ZSTD_free(zcs->inBuff, zcs->customMem); ++ zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem); ++ if (zcs->inBuff == NULL) return ERROR(memory_allocation); ++ } ++ zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); ++ } ++ if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) { ++ zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1; ++ ZSTD_free(zcs->outBuff, zcs->customMem); ++ zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem); ++ if (zcs->outBuff == NULL) return ERROR(memory_allocation); ++ } ++ ++ if (dict && dictSize >= 8) { ++ ZSTD_freeCDict(zcs->cdictLocal); ++ zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); ++ if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); ++ zcs->cdict = zcs->cdictLocal; ++ } else zcs->cdict = NULL; ++ ++ zcs->checksum = params.fParams.checksumFlag > 0; ++ zcs->params = params; ++ ++ return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); ++} ++ ++ZSTD_CStream* ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ ZSTD_CStream* const zcs = ZSTD_createCStream_advanced(stackMem); ++ if (zcs) { ++ size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); ++ if (ZSTD_isError(code)) { return NULL; } ++ } ++ return zcs; ++} ++ ++ZSTD_CStream* ZSTD_initCStream_usingCDict(const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); ++ ZSTD_CStream* const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize); ++ if (zcs) { ++ zcs->cdict = cdict; ++ if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) { ++ return NULL; ++ } ++ } ++ return zcs; ++} ++ ++/*====== Compression ======*/ ++ ++typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; ++ ++MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ size_t const length = MIN(dstCapacity, srcSize); ++ memcpy(dst, src, length); ++ return length; ++} ++ ++static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ++ void* dst, size_t* dstCapacityPtr, ++ const void* src, size_t* srcSizePtr, ++ ZSTD_flush_e const flush) ++{ ++ U32 someMoreWork = 1; ++ const char* const istart = (const char*)src; ++ const char* const iend = istart + *srcSizePtr; ++ const char* ip = istart; ++ char* const ostart = (char*)dst; ++ char* const oend = ostart + *dstCapacityPtr; ++ char* op = ostart; ++ ++ while (someMoreWork) { ++ switch(zcs->stage) ++ { ++ case zcss_init: return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ ++ ++ case zcss_load: ++ /* complete inBuffer */ ++ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; ++ size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); ++ zcs->inBuffPos += loaded; ++ ip += loaded; ++ if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) { ++ someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */ ++ } } ++ /* compress current block (note : this stage cannot be stopped in the middle) */ ++ { void* cDst; ++ size_t cSize; ++ size_t const iSize = zcs->inBuffPos - zcs->inToCompress; ++ size_t oSize = oend-op; ++ if (oSize >= ZSTD_compressBound(iSize)) ++ cDst = op; /* compress directly into output buffer (avoid flush stage) */ ++ else ++ cDst = zcs->outBuff, oSize = zcs->outBuffSize; ++ cSize = (flush == zsf_end) ? ++ ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ++ ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); ++ if (ZSTD_isError(cSize)) return cSize; ++ if (flush == zsf_end) zcs->frameEnded = 1; ++ /* prepare next block */ ++ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; ++ if (zcs->inBuffTarget > zcs->inBuffSize) ++ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ ++ zcs->inToCompress = zcs->inBuffPos; ++ if (cDst == op) { op += cSize; break; } /* no need to flush */ ++ zcs->outBuffContentSize = cSize; ++ zcs->outBuffFlushedSize = 0; ++ zcs->stage = zcss_flush; /* pass-through to flush stage */ ++ } ++ ++ case zcss_flush: ++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; ++ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); ++ op += flushed; ++ zcs->outBuffFlushedSize += flushed; ++ if (toFlush!=flushed) { someMoreWork = 0; break; } /* dst too small to store flushed data : stop there */ ++ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; ++ zcs->stage = zcss_load; ++ break; ++ } ++ ++ case zcss_final: ++ someMoreWork = 0; /* do nothing */ ++ break; ++ ++ default: ++ return ERROR(GENERIC); /* impossible */ ++ } ++ } ++ ++ *srcSizePtr = ip - istart; ++ *dstCapacityPtr = op - ostart; ++ zcs->inputProcessed += *srcSizePtr; ++ if (zcs->frameEnded) return 0; ++ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; ++ if (hintInSize==0) hintInSize = zcs->blockSize; ++ return hintInSize; ++ } ++} ++ ++size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) ++{ ++ size_t sizeRead = input->size - input->pos; ++ size_t sizeWritten = output->size - output->pos; ++ size_t const result = ZSTD_compressStream_generic(zcs, ++ (char*)(output->dst) + output->pos, &sizeWritten, ++ (const char*)(input->src) + input->pos, &sizeRead, zsf_gather); ++ input->pos += sizeRead; ++ output->pos += sizeWritten; ++ return result; ++} ++ ++ ++/*====== Finalize ======*/ ++ ++/*! ZSTD_flushStream() : ++* @return : amount of data remaining to flush */ ++size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) ++{ ++ size_t srcSize = 0; ++ size_t sizeWritten = output->size - output->pos; ++ size_t const result = ZSTD_compressStream_generic(zcs, ++ (char*)(output->dst) + output->pos, &sizeWritten, ++ &srcSize, &srcSize, /* use a valid src address instead of NULL */ ++ zsf_flush); ++ output->pos += sizeWritten; ++ if (ZSTD_isError(result)) return result; ++ return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ ++} ++ ++ ++size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) ++{ ++ BYTE* const ostart = (BYTE*)(output->dst) + output->pos; ++ BYTE* const oend = (BYTE*)(output->dst) + output->size; ++ BYTE* op = ostart; ++ ++ if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize)) ++ return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */ ++ ++ if (zcs->stage != zcss_final) { ++ /* flush whatever remains */ ++ size_t srcSize = 0; ++ size_t sizeWritten = output->size - output->pos; ++ size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */ ++ size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; ++ op += sizeWritten; ++ if (remainingToFlush) { ++ output->pos += sizeWritten; ++ return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); ++ } ++ /* create epilogue */ ++ zcs->stage = zcss_final; ++ zcs->outBuffContentSize = !notEnded ? 0 : ++ ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0); /* write epilogue, including final empty block, into outBuff */ ++ } ++ ++ /* flush epilogue */ ++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; ++ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); ++ op += flushed; ++ zcs->outBuffFlushedSize += flushed; ++ output->pos += op-ostart; ++ if (toFlush==flushed) zcs->stage = zcss_init; /* end reached */ ++ return toFlush - flushed; ++ } ++} ++ ++ ++ ++/*-===== Pre-defined compression levels =====-*/ ++ ++#define ZSTD_DEFAULT_CLEVEL 1 ++#define ZSTD_MAX_CLEVEL 22 ++int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } ++ ++static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { ++{ /* "default" */ ++ /* W, C, H, S, L, TL, strat */ ++ { 18, 12, 12, 1, 7, 16, ZSTD_fast }, /* level 0 - never used */ ++ { 19, 13, 14, 1, 7, 16, ZSTD_fast }, /* level 1 */ ++ { 19, 15, 16, 1, 6, 16, ZSTD_fast }, /* level 2 */ ++ { 20, 16, 17, 1, 5, 16, ZSTD_dfast }, /* level 3.*/ ++ { 20, 18, 18, 1, 5, 16, ZSTD_dfast }, /* level 4.*/ ++ { 20, 15, 18, 3, 5, 16, ZSTD_greedy }, /* level 5 */ ++ { 21, 16, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */ ++ { 21, 17, 20, 3, 5, 16, ZSTD_lazy }, /* level 7 */ ++ { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ ++ { 21, 20, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */ ++ { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ ++ { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ ++ { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ ++ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 13 */ ++ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 14 */ ++ { 22, 21, 21, 5, 5, 16, ZSTD_btlazy2 }, /* level 15 */ ++ { 23, 22, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 16 */ ++ { 23, 21, 22, 4, 5, 24, ZSTD_btopt }, /* level 17 */ ++ { 23, 23, 22, 6, 5, 32, ZSTD_btopt }, /* level 18 */ ++ { 23, 23, 22, 6, 3, 48, ZSTD_btopt }, /* level 19 */ ++ { 25, 25, 23, 7, 3, 64, ZSTD_btopt2 }, /* level 20 */ ++ { 26, 26, 23, 7, 3,256, ZSTD_btopt2 }, /* level 21 */ ++ { 27, 27, 25, 9, 3,512, ZSTD_btopt2 }, /* level 22 */ ++}, ++{ /* for srcSize <= 256 KB */ ++ /* W, C, H, S, L, T, strat */ ++ { 0, 0, 0, 0, 0, 0, ZSTD_fast }, /* level 0 - not used */ ++ { 18, 13, 14, 1, 6, 8, ZSTD_fast }, /* level 1 */ ++ { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */ ++ { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */ ++ { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/ ++ { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/ ++ { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/ ++ { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */ ++ { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ ++ { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ ++ { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ ++ { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/ ++ { 18, 18, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 12.*/ ++ { 18, 19, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13 */ ++ { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/ ++ { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/ ++ { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/ ++ { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/ ++ { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/ ++ { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/ ++ { 18, 19, 18, 11, 3,512, ZSTD_btopt2 }, /* level 20.*/ ++ { 18, 19, 18, 12, 3,512, ZSTD_btopt2 }, /* level 21.*/ ++ { 18, 19, 18, 13, 3,512, ZSTD_btopt2 }, /* level 22.*/ ++}, ++{ /* for srcSize <= 128 KB */ ++ /* W, C, H, S, L, T, strat */ ++ { 17, 12, 12, 1, 7, 8, ZSTD_fast }, /* level 0 - not used */ ++ { 17, 12, 13, 1, 6, 8, ZSTD_fast }, /* level 1 */ ++ { 17, 13, 16, 1, 5, 8, ZSTD_fast }, /* level 2 */ ++ { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */ ++ { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */ ++ { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */ ++ { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */ ++ { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */ ++ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ ++ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ ++ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ ++ { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ ++ { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */ ++ { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/ ++ { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/ ++ { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/ ++ { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/ ++ { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/ ++ { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/ ++ { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/ ++ { 17, 18, 17, 9, 3,256, ZSTD_btopt2 }, /* level 20.*/ ++ { 17, 18, 17, 10, 3,256, ZSTD_btopt2 }, /* level 21.*/ ++ { 17, 18, 17, 11, 3,512, ZSTD_btopt2 }, /* level 22.*/ ++}, ++{ /* for srcSize <= 16 KB */ ++ /* W, C, H, S, L, T, strat */ ++ { 14, 12, 12, 1, 7, 6, ZSTD_fast }, /* level 0 - not used */ ++ { 14, 14, 14, 1, 6, 6, ZSTD_fast }, /* level 1 */ ++ { 14, 14, 14, 1, 4, 6, ZSTD_fast }, /* level 2 */ ++ { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/ ++ { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/ ++ { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/ ++ { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */ ++ { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */ ++ { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/ ++ { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/ ++ { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/ ++ { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/ ++ { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ ++ { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ ++ { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ ++ { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ ++ { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ ++ { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ ++ { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/ ++ { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/ ++ { 14, 15, 15, 8, 3,256, ZSTD_btopt2 }, /* level 20.*/ ++ { 14, 15, 15, 9, 3,256, ZSTD_btopt2 }, /* level 21.*/ ++ { 14, 15, 15, 10, 3,256, ZSTD_btopt2 }, /* level 22.*/ ++}, ++}; ++ ++/*! ZSTD_getCParams() : ++* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. ++* Size values are optional, provide 0 if not known or unused */ ++ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) ++{ ++ ZSTD_compressionParameters cp; ++ size_t const addedSize = srcSize ? 0 : 500; ++ U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1; ++ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ ++ if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ ++ if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL; ++ cp = ZSTD_defaultCParameters[tableID][compressionLevel]; ++ if (MEM_32bits()) { /* auto-correction, for 32-bits mode */ ++ if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX; ++ if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX; ++ if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX; ++ } ++ cp = ZSTD_adjustCParams(cp, srcSize, dictSize); ++ return cp; ++} ++ ++/*! ZSTD_getParams() : ++* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). ++* All fields of `ZSTD_frameParameters` are set to default (0) */ ++ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) { ++ ZSTD_parameters params; ++ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); ++ memset(¶ms, 0, sizeof(params)); ++ params.cParams = cParams; ++ return params; ++} ++ ++EXPORT_SYMBOL(ZSTD_maxCLevel); ++EXPORT_SYMBOL(ZSTD_compressBound); ++ ++EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initCCtx); ++EXPORT_SYMBOL(ZSTD_compressCCtx); ++EXPORT_SYMBOL(ZSTD_compress_usingDict); ++ ++EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initCDict); ++EXPORT_SYMBOL(ZSTD_compress_usingCDict); ++ ++EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initCStream); ++EXPORT_SYMBOL(ZSTD_initCStream_usingCDict); ++EXPORT_SYMBOL(ZSTD_resetCStream); ++EXPORT_SYMBOL(ZSTD_compressStream); ++EXPORT_SYMBOL(ZSTD_flushStream); ++EXPORT_SYMBOL(ZSTD_endStream); ++EXPORT_SYMBOL(ZSTD_CStreamInSize); ++EXPORT_SYMBOL(ZSTD_CStreamOutSize); ++ ++EXPORT_SYMBOL(ZSTD_getCParams); ++EXPORT_SYMBOL(ZSTD_getParams); ++EXPORT_SYMBOL(ZSTD_checkCParams); ++EXPORT_SYMBOL(ZSTD_adjustCParams); ++ ++EXPORT_SYMBOL(ZSTD_compressBegin); ++EXPORT_SYMBOL(ZSTD_compressBegin_usingDict); ++EXPORT_SYMBOL(ZSTD_compressBegin_advanced); ++EXPORT_SYMBOL(ZSTD_copyCCtx); ++EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict); ++EXPORT_SYMBOL(ZSTD_compressContinue); ++EXPORT_SYMBOL(ZSTD_compressEnd); ++ ++EXPORT_SYMBOL(ZSTD_getBlockSizeMax); ++EXPORT_SYMBOL(ZSTD_compressBlock); ++ ++MODULE_LICENSE("BSD"); ++MODULE_DESCRIPTION("Zstd Compressor"); +diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c +new file mode 100644 +index 0000000..98508b1 +--- /dev/null ++++ b/lib/zstd/decompress.c +@@ -0,0 +1,2377 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++ ++/* *************************************************************** ++* Tuning parameters ++*****************************************************************/ ++/*! ++* MAXWINDOWSIZE_DEFAULT : ++* maximum window size accepted by DStream, by default. ++* Frames requiring more memory will be rejected. ++*/ ++#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT ++# define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ ++#endif ++ ++ ++/*-******************************************************* ++* Dependencies ++*********************************************************/ ++#include ++#include ++#include /* memcpy, memmove, memset */ ++#include "mem.h" /* low level memory routines */ ++#include "fse.h" ++#include "huf.h" ++#include "zstd_internal.h" ++ ++#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) ++ ++/*-************************************* ++* Macros ++***************************************/ ++#define ZSTD_isError ERR_isError /* for inlining */ ++#define FSE_isError ERR_isError ++#define HUF_isError ERR_isError ++ ++ ++/*_******************************************************* ++* Memory operations ++**********************************************************/ ++static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } ++ ++ ++/*-************************************************************* ++* Context management ++***************************************************************/ ++typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ++ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, ++ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, ++ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; ++ ++typedef struct { ++ FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; ++ FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; ++ FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; ++ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ ++ U32 rep[ZSTD_REP_NUM]; ++} ZSTD_entropyTables_t; ++ ++struct ZSTD_DCtx_s ++{ ++ const FSE_DTable* LLTptr; ++ const FSE_DTable* MLTptr; ++ const FSE_DTable* OFTptr; ++ const HUF_DTable* HUFptr; ++ ZSTD_entropyTables_t entropy; ++ const void* previousDstEnd; /* detect continuity */ ++ const void* base; /* start of current segment */ ++ const void* vBase; /* virtual start of previous segment if it was just before current one */ ++ const void* dictEnd; /* end of previous segment */ ++ size_t expected; ++ ZSTD_frameParams fParams; ++ blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ ++ ZSTD_dStage stage; ++ U32 litEntropy; ++ U32 fseEntropy; ++ struct xxh64_state xxhState; ++ size_t headerSize; ++ U32 dictID; ++ const BYTE* litPtr; ++ ZSTD_customMem customMem; ++ size_t litSize; ++ size_t rleSize; ++ BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; ++ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; ++}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ ++ ++size_t ZSTD_DCtxWorkspaceBound(void) ++{ ++ return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); ++} ++ ++size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) ++{ ++ dctx->expected = ZSTD_frameHeaderSize_prefix; ++ dctx->stage = ZSTDds_getFrameHeaderSize; ++ dctx->previousDstEnd = NULL; ++ dctx->base = NULL; ++ dctx->vBase = NULL; ++ dctx->dictEnd = NULL; ++ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ ++ dctx->litEntropy = dctx->fseEntropy = 0; ++ dctx->dictID = 0; ++ MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); ++ memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ ++ dctx->LLTptr = dctx->entropy.LLTable; ++ dctx->MLTptr = dctx->entropy.MLTable; ++ dctx->OFTptr = dctx->entropy.OFTable; ++ dctx->HUFptr = dctx->entropy.hufTable; ++ return 0; ++} ++ ++ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) ++{ ++ ZSTD_DCtx* dctx; ++ ++ if (!customMem.customAlloc || !customMem.customFree) return NULL; ++ ++ dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); ++ if (!dctx) return NULL; ++ memcpy(&dctx->customMem, &customMem, sizeof(customMem)); ++ ZSTD_decompressBegin(dctx); ++ return dctx; ++} ++ ++ZSTD_DCtx* ZSTD_initDCtx(void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ return ZSTD_createDCtx_advanced(stackMem); ++} ++ ++size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) ++{ ++ if (dctx==NULL) return 0; /* support free on NULL */ ++ ZSTD_free(dctx, dctx->customMem); ++ return 0; /* reserved as a potential error code in the future */ ++} ++ ++void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) ++{ ++ size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; ++ memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ ++} ++ ++#if 0 ++/* deprecated */ ++static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) ++{ ++ ZSTD_decompressBegin(dstDCtx); /* init */ ++ if (srcDCtx) { /* support refDCtx on NULL */ ++ dstDCtx->dictEnd = srcDCtx->dictEnd; ++ dstDCtx->vBase = srcDCtx->vBase; ++ dstDCtx->base = srcDCtx->base; ++ dstDCtx->previousDstEnd = srcDCtx->previousDstEnd; ++ dstDCtx->dictID = srcDCtx->dictID; ++ dstDCtx->litEntropy = srcDCtx->litEntropy; ++ dstDCtx->fseEntropy = srcDCtx->fseEntropy; ++ dstDCtx->LLTptr = srcDCtx->entropy.LLTable; ++ dstDCtx->MLTptr = srcDCtx->entropy.MLTable; ++ dstDCtx->OFTptr = srcDCtx->entropy.OFTable; ++ dstDCtx->HUFptr = srcDCtx->entropy.hufTable; ++ dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0]; ++ dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1]; ++ dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2]; ++ } ++} ++#endif ++ ++static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict); ++ ++ ++/*-************************************************************* ++* Decompression section ++***************************************************************/ ++ ++/*! ZSTD_isFrame() : ++ * Tells if the content of `buffer` starts with a valid Frame Identifier. ++ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. ++ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. ++ * Note 3 : Skippable Frame Identifiers are considered valid. */ ++unsigned ZSTD_isFrame(const void* buffer, size_t size) ++{ ++ if (size < 4) return 0; ++ { U32 const magic = MEM_readLE32(buffer); ++ if (magic == ZSTD_MAGICNUMBER) return 1; ++ if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; ++ } ++ return 0; ++} ++ ++ ++/** ZSTD_frameHeaderSize() : ++* srcSize must be >= ZSTD_frameHeaderSize_prefix. ++* @return : size of the Frame Header */ ++static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) ++{ ++ if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); ++ { BYTE const fhd = ((const BYTE*)src)[4]; ++ U32 const dictID= fhd & 3; ++ U32 const singleSegment = (fhd >> 5) & 1; ++ U32 const fcsId = fhd >> 6; ++ return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] ++ + (singleSegment && !fcsId); ++ } ++} ++ ++ ++/** ZSTD_getFrameParams() : ++* decode Frame Header, or require larger `srcSize`. ++* @return : 0, `fparamsPtr` is correctly filled, ++* >0, `srcSize` is too small, result is expected `srcSize`, ++* or an error code, which can be tested using ZSTD_isError() */ ++size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize) ++{ ++ const BYTE* ip = (const BYTE*)src; ++ ++ if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix; ++ if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) { ++ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { ++ if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ ++ memset(fparamsPtr, 0, sizeof(*fparamsPtr)); ++ fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4); ++ fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ ++ return 0; ++ } ++ return ERROR(prefix_unknown); ++ } ++ ++ /* ensure there is enough `srcSize` to fully read/decode frame header */ ++ { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); ++ if (srcSize < fhsize) return fhsize; } ++ ++ { BYTE const fhdByte = ip[4]; ++ size_t pos = 5; ++ U32 const dictIDSizeCode = fhdByte&3; ++ U32 const checksumFlag = (fhdByte>>2)&1; ++ U32 const singleSegment = (fhdByte>>5)&1; ++ U32 const fcsID = fhdByte>>6; ++ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; ++ U32 windowSize = 0; ++ U32 dictID = 0; ++ U64 frameContentSize = 0; ++ if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ ++ if (!singleSegment) { ++ BYTE const wlByte = ip[pos++]; ++ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; ++ if (windowLog > ZSTD_WINDOWLOG_MAX) return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ ++ windowSize = (1U << windowLog); ++ windowSize += (windowSize >> 3) * (wlByte&7); ++ } ++ ++ switch(dictIDSizeCode) ++ { ++ default: /* impossible */ ++ case 0 : break; ++ case 1 : dictID = ip[pos]; pos++; break; ++ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; ++ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; ++ } ++ switch(fcsID) ++ { ++ default: /* impossible */ ++ case 0 : if (singleSegment) frameContentSize = ip[pos]; break; ++ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; ++ case 2 : frameContentSize = MEM_readLE32(ip+pos); break; ++ case 3 : frameContentSize = MEM_readLE64(ip+pos); break; ++ } ++ if (!windowSize) windowSize = (U32)frameContentSize; ++ if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge); ++ fparamsPtr->frameContentSize = frameContentSize; ++ fparamsPtr->windowSize = windowSize; ++ fparamsPtr->dictID = dictID; ++ fparamsPtr->checksumFlag = checksumFlag; ++ } ++ return 0; ++} ++ ++/** ZSTD_getFrameContentSize() : ++* compatible with legacy mode ++* @return : decompressed size of the single frame pointed to be `src` if known, otherwise ++* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined ++* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ ++unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) ++{ ++ { ++ ZSTD_frameParams fParams; ++ if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; ++ if (fParams.windowSize == 0) { ++ /* Either skippable or empty frame, size == 0 either way */ ++ return 0; ++ } else if (fParams.frameContentSize != 0) { ++ return fParams.frameContentSize; ++ } else { ++ return ZSTD_CONTENTSIZE_UNKNOWN; ++ } ++ } ++} ++ ++/** ZSTD_findDecompressedSize() : ++ * compatible with legacy mode ++ * `srcSize` must be the exact length of some number of ZSTD compressed and/or ++ * skippable frames ++ * @return : decompressed size of the frames contained */ ++unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) ++{ ++ { ++ unsigned long long totalDstSize = 0; ++ while (srcSize >= ZSTD_frameHeaderSize_prefix) { ++ const U32 magicNumber = MEM_readLE32(src); ++ ++ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { ++ size_t skippableSize; ++ if (srcSize < ZSTD_skippableHeaderSize) ++ return ERROR(srcSize_wrong); ++ skippableSize = MEM_readLE32((const BYTE *)src + 4) + ++ ZSTD_skippableHeaderSize; ++ if (srcSize < skippableSize) { ++ return ZSTD_CONTENTSIZE_ERROR; ++ } ++ ++ src = (const BYTE *)src + skippableSize; ++ srcSize -= skippableSize; ++ continue; ++ } ++ ++ { ++ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); ++ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; ++ ++ /* check for overflow */ ++ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; ++ totalDstSize += ret; ++ } ++ { ++ size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); ++ if (ZSTD_isError(frameSrcSize)) { ++ return ZSTD_CONTENTSIZE_ERROR; ++ } ++ ++ src = (const BYTE *)src + frameSrcSize; ++ srcSize -= frameSrcSize; ++ } ++ } ++ ++ if (srcSize) { ++ return ZSTD_CONTENTSIZE_ERROR; ++ } ++ ++ return totalDstSize; ++ } ++} ++ ++/** ZSTD_decodeFrameHeader() : ++* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). ++* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ ++static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) ++{ ++ size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); ++ if (ZSTD_isError(result)) return result; /* invalid header */ ++ if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ ++ if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong); ++ if (dctx->fParams.checksumFlag) xxh64_reset(&dctx->xxhState, 0); ++ return 0; ++} ++ ++ ++typedef struct ++{ ++ blockType_e blockType; ++ U32 lastBlock; ++ U32 origSize; ++} blockProperties_t; ++ ++/*! ZSTD_getcBlockSize() : ++* Provides the size of compressed block from block header `src` */ ++size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) ++{ ++ if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ++ { U32 const cBlockHeader = MEM_readLE24(src); ++ U32 const cSize = cBlockHeader >> 3; ++ bpPtr->lastBlock = cBlockHeader & 1; ++ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); ++ bpPtr->origSize = cSize; /* only useful for RLE */ ++ if (bpPtr->blockType == bt_rle) return 1; ++ if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected); ++ return cSize; ++ } ++} ++ ++ ++static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); ++ memcpy(dst, src, srcSize); ++ return srcSize; ++} ++ ++ ++static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, size_t regenSize) ++{ ++ if (srcSize != 1) return ERROR(srcSize_wrong); ++ if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall); ++ memset(dst, *(const BYTE*)src, regenSize); ++ return regenSize; ++} ++ ++/*! ZSTD_decodeLiteralsBlock() : ++ @return : nb of bytes read from src (< srcSize ) */ ++size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, ++ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ ++{ ++ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); ++ ++ { const BYTE* const istart = (const BYTE*) src; ++ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); ++ ++ switch(litEncType) ++ { ++ case set_repeat: ++ if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); ++ /* fall-through */ ++ case set_compressed: ++ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ ++ { size_t lhSize, litSize, litCSize; ++ U32 singleStream=0; ++ U32 const lhlCode = (istart[0] >> 2) & 3; ++ U32 const lhc = MEM_readLE32(istart); ++ switch(lhlCode) ++ { ++ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ ++ /* 2 - 2 - 10 - 10 */ ++ singleStream = !lhlCode; ++ lhSize = 3; ++ litSize = (lhc >> 4) & 0x3FF; ++ litCSize = (lhc >> 14) & 0x3FF; ++ break; ++ case 2: ++ /* 2 - 2 - 14 - 14 */ ++ lhSize = 4; ++ litSize = (lhc >> 4) & 0x3FFF; ++ litCSize = lhc >> 18; ++ break; ++ case 3: ++ /* 2 - 2 - 18 - 18 */ ++ lhSize = 5; ++ litSize = (lhc >> 4) & 0x3FFFF; ++ litCSize = (lhc >> 22) + (istart[4] << 10); ++ break; ++ } ++ if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); ++ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); ++ ++ if (HUF_isError((litEncType==set_repeat) ? ++ ( singleStream ? ++ HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) : ++ HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) : ++ ( singleStream ? ++ HUF_decompress1X2_DCtx(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) : ++ HUF_decompress4X_hufOnly (dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) )) ++ return ERROR(corruption_detected); ++ ++ dctx->litPtr = dctx->litBuffer; ++ dctx->litSize = litSize; ++ dctx->litEntropy = 1; ++ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; ++ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); ++ return litCSize + lhSize; ++ } ++ ++ case set_basic: ++ { size_t litSize, lhSize; ++ U32 const lhlCode = ((istart[0]) >> 2) & 3; ++ switch(lhlCode) ++ { ++ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ ++ lhSize = 1; ++ litSize = istart[0] >> 3; ++ break; ++ case 1: ++ lhSize = 2; ++ litSize = MEM_readLE16(istart) >> 4; ++ break; ++ case 3: ++ lhSize = 3; ++ litSize = MEM_readLE24(istart) >> 4; ++ break; ++ } ++ ++ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ ++ if (litSize+lhSize > srcSize) return ERROR(corruption_detected); ++ memcpy(dctx->litBuffer, istart+lhSize, litSize); ++ dctx->litPtr = dctx->litBuffer; ++ dctx->litSize = litSize; ++ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); ++ return lhSize+litSize; ++ } ++ /* direct reference into compressed stream */ ++ dctx->litPtr = istart+lhSize; ++ dctx->litSize = litSize; ++ return lhSize+litSize; ++ } ++ ++ case set_rle: ++ { U32 const lhlCode = ((istart[0]) >> 2) & 3; ++ size_t litSize, lhSize; ++ switch(lhlCode) ++ { ++ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ ++ lhSize = 1; ++ litSize = istart[0] >> 3; ++ break; ++ case 1: ++ lhSize = 2; ++ litSize = MEM_readLE16(istart) >> 4; ++ break; ++ case 3: ++ lhSize = 3; ++ litSize = MEM_readLE24(istart) >> 4; ++ if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ ++ break; ++ } ++ if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); ++ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); ++ dctx->litPtr = dctx->litBuffer; ++ dctx->litSize = litSize; ++ return lhSize+1; ++ } ++ default: ++ return ERROR(corruption_detected); /* impossible */ ++ } ++ } ++} ++ ++ ++typedef union { ++ FSE_decode_t realData; ++ U32 alignedBy4; ++} FSE_decode_t4; ++ ++static const FSE_decode_t4 LL_defaultDTable[(1< max) return ERROR(corruption_detected); ++ FSE_buildDTable_rle(DTableSpace, *(const BYTE*)src); ++ *DTablePtr = DTableSpace; ++ return 1; ++ case set_basic : ++ *DTablePtr = (const FSE_DTable*)tmpPtr; ++ return 0; ++ case set_repeat: ++ if (!flagRepeatTable) return ERROR(corruption_detected); ++ return 0; ++ default : /* impossible */ ++ case set_compressed : ++ { U32 tableLog; ++ S16 norm[MaxSeq+1]; ++ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); ++ if (FSE_isError(headerSize)) return ERROR(corruption_detected); ++ if (tableLog > maxLog) return ERROR(corruption_detected); ++ FSE_buildDTable(DTableSpace, norm, max, tableLog); ++ *DTablePtr = DTableSpace; ++ return headerSize; ++ } } ++} ++ ++size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, ++ const void* src, size_t srcSize) ++{ ++ const BYTE* const istart = (const BYTE* const)src; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* ip = istart; ++ ++ /* check */ ++ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); ++ ++ /* SeqHead */ ++ { int nbSeq = *ip++; ++ if (!nbSeq) { *nbSeqPtr=0; return 1; } ++ if (nbSeq > 0x7F) { ++ if (nbSeq == 0xFF) { ++ if (ip+2 > iend) return ERROR(srcSize_wrong); ++ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; ++ } else { ++ if (ip >= iend) return ERROR(srcSize_wrong); ++ nbSeq = ((nbSeq-0x80)<<8) + *ip++; ++ } ++ } ++ *nbSeqPtr = nbSeq; ++ } ++ ++ /* FSE table descriptors */ ++ if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ ++ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); ++ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); ++ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ++ ip++; ++ ++ /* Build DTables */ ++ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, ++ LLtype, MaxLL, LLFSELog, ++ ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); ++ if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); ++ ip += llhSize; ++ } ++ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, ++ OFtype, MaxOff, OffFSELog, ++ ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); ++ if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); ++ ip += ofhSize; ++ } ++ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, ++ MLtype, MaxML, MLFSELog, ++ ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); ++ if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); ++ ip += mlhSize; ++ } ++ } ++ ++ return ip-istart; ++} ++ ++ ++typedef struct { ++ size_t litLength; ++ size_t matchLength; ++ size_t offset; ++ const BYTE* match; ++} seq_t; ++ ++typedef struct { ++ BIT_DStream_t DStream; ++ FSE_DState_t stateLL; ++ FSE_DState_t stateOffb; ++ FSE_DState_t stateML; ++ size_t prevOffset[ZSTD_REP_NUM]; ++ const BYTE* base; ++ size_t pos; ++ uPtrDiff gotoDict; ++} seqState_t; ++ ++ ++FORCE_NOINLINE ++size_t ZSTD_execSequenceLast7(BYTE* op, ++ BYTE* const oend, seq_t sequence, ++ const BYTE** litPtr, const BYTE* const litLimit, ++ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) ++{ ++ BYTE* const oLitEnd = op + sequence.litLength; ++ size_t const sequenceLength = sequence.litLength + sequence.matchLength; ++ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ ++ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; ++ const BYTE* const iLitEnd = *litPtr + sequence.litLength; ++ const BYTE* match = oLitEnd - sequence.offset; ++ ++ /* check */ ++ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ ++ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ ++ if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */ ++ ++ /* copy literals */ ++ if (op < oend_w) { ++ ZSTD_wildcopy(op, *litPtr, oend_w - op); ++ *litPtr += oend_w - op; ++ op = oend_w; ++ } ++ while (op < oLitEnd) *op++ = *(*litPtr)++; ++ ++ /* copy Match */ ++ if (sequence.offset > (size_t)(oLitEnd - base)) { ++ /* offset beyond prefix */ ++ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); ++ match = dictEnd - (base-match); ++ if (match + sequence.matchLength <= dictEnd) { ++ memmove(oLitEnd, match, sequence.matchLength); ++ return sequenceLength; ++ } ++ /* span extDict & currentPrefixSegment */ ++ { size_t const length1 = dictEnd - match; ++ memmove(oLitEnd, match, length1); ++ op = oLitEnd + length1; ++ sequence.matchLength -= length1; ++ match = base; ++ } } ++ while (op < oMatchEnd) *op++ = *match++; ++ return sequenceLength; ++} ++ ++ ++ ++ ++static seq_t ZSTD_decodeSequence(seqState_t* seqState) ++{ ++ seq_t seq; ++ ++ U32 const llCode = FSE_peekSymbol(&seqState->stateLL); ++ U32 const mlCode = FSE_peekSymbol(&seqState->stateML); ++ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ ++ ++ U32 const llBits = LL_bits[llCode]; ++ U32 const mlBits = ML_bits[mlCode]; ++ U32 const ofBits = ofCode; ++ U32 const totalBits = llBits+mlBits+ofBits; ++ ++ static const U32 LL_base[MaxLL+1] = { ++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ++ 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, ++ 0x2000, 0x4000, 0x8000, 0x10000 }; ++ ++ static const U32 ML_base[MaxML+1] = { ++ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ++ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, ++ 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, ++ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; ++ ++ static const U32 OF_base[MaxOff+1] = { ++ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, ++ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, ++ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, ++ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; ++ ++ /* sequence */ ++ { size_t offset; ++ if (!ofCode) ++ offset = 0; ++ else { ++ offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ ++ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); ++ } ++ ++ if (ofCode <= 1) { ++ offset += (llCode==0); ++ if (offset) { ++ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; ++ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ ++ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; ++ seqState->prevOffset[1] = seqState->prevOffset[0]; ++ seqState->prevOffset[0] = offset = temp; ++ } else { ++ offset = seqState->prevOffset[0]; ++ } ++ } else { ++ seqState->prevOffset[2] = seqState->prevOffset[1]; ++ seqState->prevOffset[1] = seqState->prevOffset[0]; ++ seqState->prevOffset[0] = offset; ++ } ++ seq.offset = offset; ++ } ++ ++ seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ ++ if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); ++ ++ seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ ++ if (MEM_32bits() || ++ (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); ++ ++ /* ANS state update */ ++ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ ++ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ ++ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ++ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ ++ ++ return seq; ++} ++ ++ ++FORCE_INLINE ++size_t ZSTD_execSequence(BYTE* op, ++ BYTE* const oend, seq_t sequence, ++ const BYTE** litPtr, const BYTE* const litLimit, ++ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) ++{ ++ BYTE* const oLitEnd = op + sequence.litLength; ++ size_t const sequenceLength = sequence.litLength + sequence.matchLength; ++ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ ++ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; ++ const BYTE* const iLitEnd = *litPtr + sequence.litLength; ++ const BYTE* match = oLitEnd - sequence.offset; ++ ++ /* check */ ++ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ ++ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ ++ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); ++ ++ /* copy Literals */ ++ ZSTD_copy8(op, *litPtr); ++ if (sequence.litLength > 8) ++ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ ++ op = oLitEnd; ++ *litPtr = iLitEnd; /* update for next sequence */ ++ ++ /* copy Match */ ++ if (sequence.offset > (size_t)(oLitEnd - base)) { ++ /* offset beyond prefix */ ++ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); ++ match = dictEnd + (match - base); ++ if (match + sequence.matchLength <= dictEnd) { ++ memmove(oLitEnd, match, sequence.matchLength); ++ return sequenceLength; ++ } ++ /* span extDict & currentPrefixSegment */ ++ { size_t const length1 = dictEnd - match; ++ memmove(oLitEnd, match, length1); ++ op = oLitEnd + length1; ++ sequence.matchLength -= length1; ++ match = base; ++ if (op > oend_w || sequence.matchLength < MINMATCH) { ++ U32 i; ++ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; ++ return sequenceLength; ++ } ++ } } ++ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ ++ ++ /* match within prefix */ ++ if (sequence.offset < 8) { ++ /* close range match, overlap */ ++ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ ++ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ ++ int const sub2 = dec64table[sequence.offset]; ++ op[0] = match[0]; ++ op[1] = match[1]; ++ op[2] = match[2]; ++ op[3] = match[3]; ++ match += dec32table[sequence.offset]; ++ ZSTD_copy4(op+4, match); ++ match -= sub2; ++ } else { ++ ZSTD_copy8(op, match); ++ } ++ op += 8; match += 8; ++ ++ if (oMatchEnd > oend-(16-MINMATCH)) { ++ if (op < oend_w) { ++ ZSTD_wildcopy(op, match, oend_w - op); ++ match += oend_w - op; ++ op = oend_w; ++ } ++ while (op < oMatchEnd) *op++ = *match++; ++ } else { ++ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ ++ } ++ return sequenceLength; ++} ++ ++ ++static size_t ZSTD_decompressSequences( ++ ZSTD_DCtx* dctx, ++ void* dst, size_t maxDstSize, ++ const void* seqStart, size_t seqSize) ++{ ++ const BYTE* ip = (const BYTE*)seqStart; ++ const BYTE* const iend = ip + seqSize; ++ BYTE* const ostart = (BYTE* const)dst; ++ BYTE* const oend = ostart + maxDstSize; ++ BYTE* op = ostart; ++ const BYTE* litPtr = dctx->litPtr; ++ const BYTE* const litEnd = litPtr + dctx->litSize; ++ const BYTE* const base = (const BYTE*) (dctx->base); ++ const BYTE* const vBase = (const BYTE*) (dctx->vBase); ++ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); ++ int nbSeq; ++ ++ /* Build Decoding Tables */ ++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); ++ if (ZSTD_isError(seqHSize)) return seqHSize; ++ ip += seqHSize; ++ } ++ ++ /* Regen sequences */ ++ if (nbSeq) { ++ seqState_t seqState; ++ dctx->fseEntropy = 1; ++ { U32 i; for (i=0; ientropy.rep[i]; } ++ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); ++ FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ++ FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ++ FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); ++ ++ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { ++ nbSeq--; ++ { seq_t const sequence = ZSTD_decodeSequence(&seqState); ++ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); ++ if (ZSTD_isError(oneSeqSize)) return oneSeqSize; ++ op += oneSeqSize; ++ } } ++ ++ /* check if reached exact end */ ++ if (nbSeq) return ERROR(corruption_detected); ++ /* save reps for next block */ ++ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } ++ } ++ ++ /* last literal segment */ ++ { size_t const lastLLSize = litEnd - litPtr; ++ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); ++ memcpy(op, litPtr, lastLLSize); ++ op += lastLLSize; ++ } ++ ++ return op-ostart; ++} ++ ++ ++FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets) ++{ ++ seq_t seq; ++ ++ U32 const llCode = FSE_peekSymbol(&seqState->stateLL); ++ U32 const mlCode = FSE_peekSymbol(&seqState->stateML); ++ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ ++ ++ U32 const llBits = LL_bits[llCode]; ++ U32 const mlBits = ML_bits[mlCode]; ++ U32 const ofBits = ofCode; ++ U32 const totalBits = llBits+mlBits+ofBits; ++ ++ static const U32 LL_base[MaxLL+1] = { ++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ++ 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, ++ 0x2000, 0x4000, 0x8000, 0x10000 }; ++ ++ static const U32 ML_base[MaxML+1] = { ++ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ++ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, ++ 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, ++ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; ++ ++ static const U32 OF_base[MaxOff+1] = { ++ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, ++ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, ++ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, ++ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; ++ ++ /* sequence */ ++ { size_t offset; ++ if (!ofCode) ++ offset = 0; ++ else { ++ if (longOffsets) { ++ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); ++ offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); ++ if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); ++ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); ++ } else { ++ offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ ++ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); ++ } ++ } ++ ++ if (ofCode <= 1) { ++ offset += (llCode==0); ++ if (offset) { ++ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; ++ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ ++ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; ++ seqState->prevOffset[1] = seqState->prevOffset[0]; ++ seqState->prevOffset[0] = offset = temp; ++ } else { ++ offset = seqState->prevOffset[0]; ++ } ++ } else { ++ seqState->prevOffset[2] = seqState->prevOffset[1]; ++ seqState->prevOffset[1] = seqState->prevOffset[0]; ++ seqState->prevOffset[0] = offset; ++ } ++ seq.offset = offset; ++ } ++ ++ seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ ++ if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); ++ ++ seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ ++ if (MEM_32bits() || ++ (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); ++ ++ { size_t const pos = seqState->pos + seq.litLength; ++ seq.match = seqState->base + pos - seq.offset; /* single memory segment */ ++ if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ ++ seqState->pos = pos + seq.matchLength; ++ } ++ ++ /* ANS state update */ ++ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ ++ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ ++ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ++ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ ++ ++ return seq; ++} ++ ++static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) { ++ if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { ++ return ZSTD_decodeSequenceLong_generic(seqState, 1); ++ } else { ++ return ZSTD_decodeSequenceLong_generic(seqState, 0); ++ } ++} ++ ++FORCE_INLINE ++size_t ZSTD_execSequenceLong(BYTE* op, ++ BYTE* const oend, seq_t sequence, ++ const BYTE** litPtr, const BYTE* const litLimit, ++ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) ++{ ++ BYTE* const oLitEnd = op + sequence.litLength; ++ size_t const sequenceLength = sequence.litLength + sequence.matchLength; ++ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ ++ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; ++ const BYTE* const iLitEnd = *litPtr + sequence.litLength; ++ const BYTE* match = sequence.match; ++ ++ /* check */ ++#if 1 ++ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ ++ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ ++ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); ++#endif ++ ++ /* copy Literals */ ++ ZSTD_copy8(op, *litPtr); ++ if (sequence.litLength > 8) ++ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ ++ op = oLitEnd; ++ *litPtr = iLitEnd; /* update for next sequence */ ++ ++ /* copy Match */ ++#if 1 ++ if (sequence.offset > (size_t)(oLitEnd - base)) { ++ /* offset beyond prefix */ ++ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); ++ if (match + sequence.matchLength <= dictEnd) { ++ memmove(oLitEnd, match, sequence.matchLength); ++ return sequenceLength; ++ } ++ /* span extDict & currentPrefixSegment */ ++ { size_t const length1 = dictEnd - match; ++ memmove(oLitEnd, match, length1); ++ op = oLitEnd + length1; ++ sequence.matchLength -= length1; ++ match = base; ++ if (op > oend_w || sequence.matchLength < MINMATCH) { ++ U32 i; ++ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; ++ return sequenceLength; ++ } ++ } } ++ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ ++#endif ++ ++ /* match within prefix */ ++ if (sequence.offset < 8) { ++ /* close range match, overlap */ ++ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ ++ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ ++ int const sub2 = dec64table[sequence.offset]; ++ op[0] = match[0]; ++ op[1] = match[1]; ++ op[2] = match[2]; ++ op[3] = match[3]; ++ match += dec32table[sequence.offset]; ++ ZSTD_copy4(op+4, match); ++ match -= sub2; ++ } else { ++ ZSTD_copy8(op, match); ++ } ++ op += 8; match += 8; ++ ++ if (oMatchEnd > oend-(16-MINMATCH)) { ++ if (op < oend_w) { ++ ZSTD_wildcopy(op, match, oend_w - op); ++ match += oend_w - op; ++ op = oend_w; ++ } ++ while (op < oMatchEnd) *op++ = *match++; ++ } else { ++ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ ++ } ++ return sequenceLength; ++} ++ ++static size_t ZSTD_decompressSequencesLong( ++ ZSTD_DCtx* dctx, ++ void* dst, size_t maxDstSize, ++ const void* seqStart, size_t seqSize) ++{ ++ const BYTE* ip = (const BYTE*)seqStart; ++ const BYTE* const iend = ip + seqSize; ++ BYTE* const ostart = (BYTE* const)dst; ++ BYTE* const oend = ostart + maxDstSize; ++ BYTE* op = ostart; ++ const BYTE* litPtr = dctx->litPtr; ++ const BYTE* const litEnd = litPtr + dctx->litSize; ++ const BYTE* const base = (const BYTE*) (dctx->base); ++ const BYTE* const vBase = (const BYTE*) (dctx->vBase); ++ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); ++ unsigned const windowSize = dctx->fParams.windowSize; ++ int nbSeq; ++ ++ /* Build Decoding Tables */ ++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); ++ if (ZSTD_isError(seqHSize)) return seqHSize; ++ ip += seqHSize; ++ } ++ ++ /* Regen sequences */ ++ if (nbSeq) { ++#define STORED_SEQS 4 ++#define STOSEQ_MASK (STORED_SEQS-1) ++#define ADVANCED_SEQS 4 ++ seq_t sequences[STORED_SEQS]; ++ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); ++ seqState_t seqState; ++ int seqNb; ++ dctx->fseEntropy = 1; ++ { U32 i; for (i=0; ientropy.rep[i]; } ++ seqState.base = base; ++ seqState.pos = (size_t)(op-base); ++ seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ ++ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); ++ FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ++ FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ++ FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); ++ ++ /* prepare in advance */ ++ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } ++ } ++ ++ /* last literal segment */ ++ { size_t const lastLLSize = litEnd - litPtr; ++ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); ++ memcpy(op, litPtr, lastLLSize); ++ op += lastLLSize; ++ } ++ ++ return op-ostart; ++} ++ ++ ++static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize) ++{ /* blockType == blockCompressed */ ++ const BYTE* ip = (const BYTE*)src; ++ ++ if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); ++ ++ /* Decode literals section */ ++ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); ++ if (ZSTD_isError(litCSize)) return litCSize; ++ ip += litCSize; ++ srcSize -= litCSize; ++ } ++ if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ ++ /* likely because of register pressure */ ++ /* if that's the correct cause, then 32-bits ARM should be affected differently */ ++ /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ ++ if (dctx->fParams.windowSize > (1<<23)) ++ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); ++ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); ++} ++ ++ ++static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) ++{ ++ if (dst != dctx->previousDstEnd) { /* not contiguous */ ++ dctx->dictEnd = dctx->previousDstEnd; ++ dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); ++ dctx->base = dst; ++ dctx->previousDstEnd = dst; ++ } ++} ++ ++size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize) ++{ ++ size_t dSize; ++ ZSTD_checkContinuity(dctx, dst); ++ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); ++ dctx->previousDstEnd = (char*)dst + dSize; ++ return dSize; ++} ++ ++ ++/** ZSTD_insertBlock() : ++ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ ++size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) ++{ ++ ZSTD_checkContinuity(dctx, blockStart); ++ dctx->previousDstEnd = (const char*)blockStart + blockSize; ++ return blockSize; ++} ++ ++ ++size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) ++{ ++ if (length > dstCapacity) return ERROR(dstSize_tooSmall); ++ memset(dst, byte, length); ++ return length; ++} ++ ++/** ZSTD_findFrameCompressedSize() : ++ * compatible with legacy mode ++ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame ++ * `srcSize` must be at least as large as the frame contained ++ * @return : the compressed size of the frame starting at `src` */ ++size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) ++{ ++ if (srcSize >= ZSTD_skippableHeaderSize && ++ (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { ++ return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4); ++ } else { ++ const BYTE* ip = (const BYTE*)src; ++ const BYTE* const ipstart = ip; ++ size_t remainingSize = srcSize; ++ ZSTD_frameParams fParams; ++ ++ size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); ++ if (ZSTD_isError(headerSize)) return headerSize; ++ ++ /* Frame Header */ ++ { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); ++ if (ZSTD_isError(ret)) return ret; ++ if (ret > 0) return ERROR(srcSize_wrong); ++ } ++ ++ ip += headerSize; ++ remainingSize -= headerSize; ++ ++ /* Loop on each block */ ++ while (1) { ++ blockProperties_t blockProperties; ++ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); ++ if (ZSTD_isError(cBlockSize)) return cBlockSize; ++ ++ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong); ++ ++ ip += ZSTD_blockHeaderSize + cBlockSize; ++ remainingSize -= ZSTD_blockHeaderSize + cBlockSize; ++ ++ if (blockProperties.lastBlock) break; ++ } ++ ++ if (fParams.checksumFlag) { /* Frame content checksum */ ++ if (remainingSize < 4) return ERROR(srcSize_wrong); ++ ip += 4; ++ remainingSize -= 4; ++ } ++ ++ return ip - ipstart; ++ } ++} ++ ++/*! ZSTD_decompressFrame() : ++* @dctx must be properly initialized */ ++static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, ++ void* dst, size_t dstCapacity, ++ const void** srcPtr, size_t *srcSizePtr) ++{ ++ const BYTE* ip = (const BYTE*)(*srcPtr); ++ BYTE* const ostart = (BYTE* const)dst; ++ BYTE* const oend = ostart + dstCapacity; ++ BYTE* op = ostart; ++ size_t remainingSize = *srcSizePtr; ++ ++ /* check */ ++ if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ++ ++ /* Frame Header */ ++ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); ++ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; ++ if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ++ CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); ++ ip += frameHeaderSize; remainingSize -= frameHeaderSize; ++ } ++ ++ /* Loop on each block */ ++ while (1) { ++ size_t decodedSize; ++ blockProperties_t blockProperties; ++ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); ++ if (ZSTD_isError(cBlockSize)) return cBlockSize; ++ ++ ip += ZSTD_blockHeaderSize; ++ remainingSize -= ZSTD_blockHeaderSize; ++ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); ++ ++ switch(blockProperties.blockType) ++ { ++ case bt_compressed: ++ decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); ++ break; ++ case bt_raw : ++ decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); ++ break; ++ case bt_rle : ++ decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); ++ break; ++ case bt_reserved : ++ default: ++ return ERROR(corruption_detected); ++ } ++ ++ if (ZSTD_isError(decodedSize)) return decodedSize; ++ if (dctx->fParams.checksumFlag) xxh64_update(&dctx->xxhState, op, decodedSize); ++ op += decodedSize; ++ ip += cBlockSize; ++ remainingSize -= cBlockSize; ++ if (blockProperties.lastBlock) break; ++ } ++ ++ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ ++ U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState); ++ U32 checkRead; ++ if (remainingSize<4) return ERROR(checksum_wrong); ++ checkRead = MEM_readLE32(ip); ++ if (checkRead != checkCalc) return ERROR(checksum_wrong); ++ ip += 4; ++ remainingSize -= 4; ++ } ++ ++ /* Allow caller to get size read */ ++ *srcPtr = ip; ++ *srcSizePtr = remainingSize; ++ return op-ostart; ++} ++ ++static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict); ++static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict); ++ ++static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ const void *dict, size_t dictSize, ++ const ZSTD_DDict* ddict) ++{ ++ void* const dststart = dst; ++ ++ if (ddict) { ++ if (dict) { ++ /* programmer error, these two cases should be mutually exclusive */ ++ return ERROR(GENERIC); ++ } ++ ++ dict = ZSTD_DDictDictContent(ddict); ++ dictSize = ZSTD_DDictDictSize(ddict); ++ } ++ ++ while (srcSize >= ZSTD_frameHeaderSize_prefix) { ++ U32 magicNumber; ++ ++ magicNumber = MEM_readLE32(src); ++ if (magicNumber != ZSTD_MAGICNUMBER) { ++ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { ++ size_t skippableSize; ++ if (srcSize < ZSTD_skippableHeaderSize) ++ return ERROR(srcSize_wrong); ++ skippableSize = MEM_readLE32((const BYTE *)src + 4) + ++ ZSTD_skippableHeaderSize; ++ if (srcSize < skippableSize) { ++ return ERROR(srcSize_wrong); ++ } ++ ++ src = (const BYTE *)src + skippableSize; ++ srcSize -= skippableSize; ++ continue; ++ } else { ++ return ERROR(prefix_unknown); ++ } ++ } ++ ++ if (ddict) { ++ /* we were called from ZSTD_decompress_usingDDict */ ++ ZSTD_refDDict(dctx, ddict); ++ } else { ++ /* this will initialize correctly with no dict if dict == NULL, so ++ * use this in all cases but ddict */ ++ CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); ++ } ++ ZSTD_checkContinuity(dctx, dst); ++ ++ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, ++ &src, &srcSize); ++ if (ZSTD_isError(res)) return res; ++ /* don't need to bounds check this, ZSTD_decompressFrame will have ++ * already */ ++ dst = (BYTE*)dst + res; ++ dstCapacity -= res; ++ } ++ } ++ ++ if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */ ++ ++ return (BYTE*)dst - (BYTE*)dststart; ++} ++ ++size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void *dict, size_t dictSize) ++{ ++ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); ++} ++ ++ ++size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); ++} ++ ++ ++/*-************************************** ++* Advanced Streaming Decompression API ++* Bufferless and synchronous ++****************************************/ ++size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } ++ ++ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { ++ switch(dctx->stage) ++ { ++ default: /* should not happen */ ++ case ZSTDds_getFrameHeaderSize: ++ case ZSTDds_decodeFrameHeader: ++ return ZSTDnit_frameHeader; ++ case ZSTDds_decodeBlockHeader: ++ return ZSTDnit_blockHeader; ++ case ZSTDds_decompressBlock: ++ return ZSTDnit_block; ++ case ZSTDds_decompressLastBlock: ++ return ZSTDnit_lastBlock; ++ case ZSTDds_checkChecksum: ++ return ZSTDnit_checksum; ++ case ZSTDds_decodeSkippableHeader: ++ case ZSTDds_skipFrame: ++ return ZSTDnit_skippableFrame; ++ } ++} ++ ++int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ ++ ++/** ZSTD_decompressContinue() : ++* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) ++* or an error code, which can be tested using ZSTD_isError() */ ++size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ /* Sanity check */ ++ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); ++ if (dstCapacity) ZSTD_checkContinuity(dctx, dst); ++ ++ switch (dctx->stage) ++ { ++ case ZSTDds_getFrameHeaderSize : ++ if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); /* impossible */ ++ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ ++ memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); ++ dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ ++ dctx->stage = ZSTDds_decodeSkippableHeader; ++ return 0; ++ } ++ dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); ++ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; ++ memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); ++ if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { ++ dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; ++ dctx->stage = ZSTDds_decodeFrameHeader; ++ return 0; ++ } ++ dctx->expected = 0; /* not necessary to copy more */ ++ ++ case ZSTDds_decodeFrameHeader: ++ memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); ++ CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); ++ dctx->expected = ZSTD_blockHeaderSize; ++ dctx->stage = ZSTDds_decodeBlockHeader; ++ return 0; ++ ++ case ZSTDds_decodeBlockHeader: ++ { blockProperties_t bp; ++ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); ++ if (ZSTD_isError(cBlockSize)) return cBlockSize; ++ dctx->expected = cBlockSize; ++ dctx->bType = bp.blockType; ++ dctx->rleSize = bp.origSize; ++ if (cBlockSize) { ++ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; ++ return 0; ++ } ++ /* empty block */ ++ if (bp.lastBlock) { ++ if (dctx->fParams.checksumFlag) { ++ dctx->expected = 4; ++ dctx->stage = ZSTDds_checkChecksum; ++ } else { ++ dctx->expected = 0; /* end of frame */ ++ dctx->stage = ZSTDds_getFrameHeaderSize; ++ } ++ } else { ++ dctx->expected = 3; /* go directly to next header */ ++ dctx->stage = ZSTDds_decodeBlockHeader; ++ } ++ return 0; ++ } ++ case ZSTDds_decompressLastBlock: ++ case ZSTDds_decompressBlock: ++ { size_t rSize; ++ switch(dctx->bType) ++ { ++ case bt_compressed: ++ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); ++ break; ++ case bt_raw : ++ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); ++ break; ++ case bt_rle : ++ rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); ++ break; ++ case bt_reserved : /* should never happen */ ++ default: ++ return ERROR(corruption_detected); ++ } ++ if (ZSTD_isError(rSize)) return rSize; ++ if (dctx->fParams.checksumFlag) xxh64_update(&dctx->xxhState, dst, rSize); ++ ++ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ ++ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ ++ dctx->expected = 4; ++ dctx->stage = ZSTDds_checkChecksum; ++ } else { ++ dctx->expected = 0; /* ends here */ ++ dctx->stage = ZSTDds_getFrameHeaderSize; ++ } ++ } else { ++ dctx->stage = ZSTDds_decodeBlockHeader; ++ dctx->expected = ZSTD_blockHeaderSize; ++ dctx->previousDstEnd = (char*)dst + rSize; ++ } ++ return rSize; ++ } ++ case ZSTDds_checkChecksum: ++ { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); ++ U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ ++ if (check32 != h32) return ERROR(checksum_wrong); ++ dctx->expected = 0; ++ dctx->stage = ZSTDds_getFrameHeaderSize; ++ return 0; ++ } ++ case ZSTDds_decodeSkippableHeader: ++ { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); ++ dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); ++ dctx->stage = ZSTDds_skipFrame; ++ return 0; ++ } ++ case ZSTDds_skipFrame: ++ { dctx->expected = 0; ++ dctx->stage = ZSTDds_getFrameHeaderSize; ++ return 0; ++ } ++ default: ++ return ERROR(GENERIC); /* impossible */ ++ } ++} ++ ++ ++static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) ++{ ++ dctx->dictEnd = dctx->previousDstEnd; ++ dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); ++ dctx->base = dict; ++ dctx->previousDstEnd = (const char*)dict + dictSize; ++ return 0; ++} ++ ++/* ZSTD_loadEntropy() : ++ * dict : must point at beginning of a valid zstd dictionary ++ * @return : size of entropy tables read */ ++static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize) ++{ ++ const BYTE* dictPtr = (const BYTE*)dict; ++ const BYTE* const dictEnd = dictPtr + dictSize; ++ ++ if (dictSize <= 8) return ERROR(dictionary_corrupted); ++ dictPtr += 8; /* skip header = magic + dictID */ ++ ++ ++ { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); ++ if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); ++ dictPtr += hSize; ++ } ++ ++ { short offcodeNCount[MaxOff+1]; ++ U32 offcodeMaxValue = MaxOff, offcodeLog; ++ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); ++ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); ++ CHECK_E(FSE_buildDTable(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted); ++ dictPtr += offcodeHeaderSize; ++ } ++ ++ { short matchlengthNCount[MaxML+1]; ++ unsigned matchlengthMaxValue = MaxML, matchlengthLog; ++ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); ++ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); ++ CHECK_E(FSE_buildDTable(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted); ++ dictPtr += matchlengthHeaderSize; ++ } ++ ++ { short litlengthNCount[MaxLL+1]; ++ unsigned litlengthMaxValue = MaxLL, litlengthLog; ++ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); ++ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); ++ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); ++ CHECK_E(FSE_buildDTable(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted); ++ dictPtr += litlengthHeaderSize; ++ } ++ ++ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); ++ { int i; ++ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); ++ for (i=0; i<3; i++) { ++ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; ++ if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted); ++ entropy->rep[i] = rep; ++ } } ++ ++ return dictPtr - (const BYTE*)dict; ++} ++ ++static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) ++{ ++ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); ++ { U32 const magic = MEM_readLE32(dict); ++ if (magic != ZSTD_DICT_MAGIC) { ++ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ ++ } } ++ dctx->dictID = MEM_readLE32((const char*)dict + 4); ++ ++ /* load entropy tables */ ++ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); ++ if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); ++ dict = (const char*)dict + eSize; ++ dictSize -= eSize; ++ } ++ dctx->litEntropy = dctx->fseEntropy = 1; ++ ++ /* reference dictionary content */ ++ return ZSTD_refDictContent(dctx, dict, dictSize); ++} ++ ++size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) ++{ ++ CHECK_F(ZSTD_decompressBegin(dctx)); ++ if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); ++ return 0; ++} ++ ++ ++/* ====== ZSTD_DDict ====== */ ++ ++struct ZSTD_DDict_s { ++ void* dictBuffer; ++ const void* dictContent; ++ size_t dictSize; ++ ZSTD_entropyTables_t entropy; ++ U32 dictID; ++ U32 entropyPresent; ++ ZSTD_customMem cMem; ++}; /* typedef'd to ZSTD_DDict within "zstd.h" */ ++ ++size_t ZSTD_DDictWorkspaceBound(void) ++{ ++ return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); ++} ++ ++static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict) ++{ ++ return ddict->dictContent; ++} ++ ++static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict) ++{ ++ return ddict->dictSize; ++} ++ ++static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict) ++{ ++ ZSTD_decompressBegin(dstDCtx); /* init */ ++ if (ddict) { /* support refDDict on NULL */ ++ dstDCtx->dictID = ddict->dictID; ++ dstDCtx->base = ddict->dictContent; ++ dstDCtx->vBase = ddict->dictContent; ++ dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; ++ dstDCtx->previousDstEnd = dstDCtx->dictEnd; ++ if (ddict->entropyPresent) { ++ dstDCtx->litEntropy = 1; ++ dstDCtx->fseEntropy = 1; ++ dstDCtx->LLTptr = ddict->entropy.LLTable; ++ dstDCtx->MLTptr = ddict->entropy.MLTable; ++ dstDCtx->OFTptr = ddict->entropy.OFTable; ++ dstDCtx->HUFptr = ddict->entropy.hufTable; ++ dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; ++ dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; ++ dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; ++ } else { ++ dstDCtx->litEntropy = 0; ++ dstDCtx->fseEntropy = 0; ++ } ++ } ++} ++ ++static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict) ++{ ++ ddict->dictID = 0; ++ ddict->entropyPresent = 0; ++ if (ddict->dictSize < 8) return 0; ++ { U32 const magic = MEM_readLE32(ddict->dictContent); ++ if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */ ++ } ++ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); ++ ++ /* load entropy tables */ ++ CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted ); ++ ddict->entropyPresent = 1; ++ return 0; ++} ++ ++ ++static ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) ++{ ++ if (!customMem.customAlloc || !customMem.customFree) return NULL; ++ ++ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); ++ if (!ddict) return NULL; ++ ddict->cMem = customMem; ++ ++ if ((byReference) || (!dict) || (!dictSize)) { ++ ddict->dictBuffer = NULL; ++ ddict->dictContent = dict; ++ } else { ++ void* const internalBuffer = ZSTD_malloc(dictSize, customMem); ++ if (!internalBuffer) { ZSTD_freeDDict(ddict); return NULL; } ++ memcpy(internalBuffer, dict, dictSize); ++ ddict->dictBuffer = internalBuffer; ++ ddict->dictContent = internalBuffer; ++ } ++ ddict->dictSize = dictSize; ++ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ ++ /* parse dictionary content */ ++ { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); ++ if (ZSTD_isError(errorCode)) { ++ ZSTD_freeDDict(ddict); ++ return NULL; ++ } } ++ ++ return ddict; ++ } ++} ++ ++/*! ZSTD_initDDict() : ++* Create a digested dictionary, to start decompression without startup delay. ++* `dict` content is copied inside DDict. ++* Consequently, `dict` can be released after `ZSTD_DDict` creation */ ++ZSTD_DDict* ZSTD_initDDict(const void* dict, size_t dictSize, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem); ++} ++ ++ ++size_t ZSTD_freeDDict(ZSTD_DDict* ddict) ++{ ++ if (ddict==NULL) return 0; /* support free on NULL */ ++ { ZSTD_customMem const cMem = ddict->cMem; ++ ZSTD_free(ddict->dictBuffer, cMem); ++ ZSTD_free(ddict, cMem); ++ return 0; ++ } ++} ++ ++/*! ZSTD_getDictID_fromDict() : ++ * Provides the dictID stored within dictionary. ++ * if @return == 0, the dictionary is not conformant with Zstandard specification. ++ * It can still be loaded, but as a content-only dictionary. */ ++unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) ++{ ++ if (dictSize < 8) return 0; ++ if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return 0; ++ return MEM_readLE32((const char*)dict + 4); ++} ++ ++/*! ZSTD_getDictID_fromDDict() : ++ * Provides the dictID of the dictionary loaded into `ddict`. ++ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. ++ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ++unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) ++{ ++ if (ddict==NULL) return 0; ++ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); ++} ++ ++/*! ZSTD_getDictID_fromFrame() : ++ * Provides the dictID required to decompressed the frame stored within `src`. ++ * If @return == 0, the dictID could not be decoded. ++ * This could for one of the following reasons : ++ * - The frame does not require a dictionary to be decoded (most common case). ++ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. ++ * Note : this use case also happens when using a non-conformant dictionary. ++ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). ++ * - This is not a Zstandard frame. ++ * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ ++unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) ++{ ++ ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 }; ++ size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); ++ if (ZSTD_isError(hError)) return 0; ++ return zfp.dictID; ++} ++ ++ ++/*! ZSTD_decompress_usingDDict() : ++* Decompression using a pre-digested Dictionary ++* Use dictionary without significant overhead. */ ++size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, ++ void* dst, size_t dstCapacity, ++ const void* src, size_t srcSize, ++ const ZSTD_DDict* ddict) ++{ ++ /* pass content and size in case legacy frames are encountered */ ++ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, ++ NULL, 0, ++ ddict); ++} ++ ++ ++/*===================================== ++* Streaming decompression ++*====================================*/ ++ ++typedef enum { zdss_init, zdss_loadHeader, ++ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; ++ ++/* *** Resource management *** */ ++struct ZSTD_DStream_s { ++ ZSTD_DCtx* dctx; ++ ZSTD_DDict* ddictLocal; ++ const ZSTD_DDict* ddict; ++ ZSTD_frameParams fParams; ++ ZSTD_dStreamStage stage; ++ char* inBuff; ++ size_t inBuffSize; ++ size_t inPos; ++ size_t maxWindowSize; ++ char* outBuff; ++ size_t outBuffSize; ++ size_t outStart; ++ size_t outEnd; ++ size_t blockSize; ++ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ ++ size_t lhSize; ++ ZSTD_customMem customMem; ++ void* legacyContext; ++ U32 previousLegacyVersion; ++ U32 legacyVersion; ++ U32 hostageByte; ++}; /* typedef'd to ZSTD_DStream within "zstd.h" */ ++ ++size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize) { ++ size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); ++ size_t const inBuffSize = blockSize; ++ size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; ++ return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); ++} ++ ++static ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) ++{ ++ ZSTD_DStream* zds; ++ ++ if (!customMem.customAlloc || !customMem.customFree) return NULL; ++ ++ zds = (ZSTD_DStream*) ZSTD_malloc(sizeof(ZSTD_DStream), customMem); ++ if (zds==NULL) return NULL; ++ memset(zds, 0, sizeof(ZSTD_DStream)); ++ memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); ++ zds->dctx = ZSTD_createDCtx_advanced(customMem); ++ if (zds->dctx == NULL) { ZSTD_freeDStream(zds); return NULL; } ++ zds->stage = zdss_init; ++ zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; ++ return zds; ++} ++ ++ZSTD_DStream* ZSTD_initDStream(size_t maxWindowSize, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); ++ ZSTD_DStream* zds = ZSTD_createDStream_advanced(stackMem); ++ if (!zds) { return NULL; } ++ ++ zds->maxWindowSize = maxWindowSize; ++ zds->stage = zdss_loadHeader; ++ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; ++ ZSTD_freeDDict(zds->ddictLocal); ++ zds->ddictLocal = NULL; ++ zds->ddict = zds->ddictLocal; ++ zds->legacyVersion = 0; ++ zds->hostageByte = 0; ++ return zds; ++} ++ ++ZSTD_DStream* ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict* ddict, void* workspace, size_t workspaceSize) ++{ ++ ZSTD_DStream* zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize); ++ if (zds) { ++ zds->ddict = ddict; ++ } ++ return zds; ++} ++ ++size_t ZSTD_freeDStream(ZSTD_DStream* zds) ++{ ++ if (zds==NULL) return 0; /* support free on null */ ++ { ZSTD_customMem const cMem = zds->customMem; ++ ZSTD_freeDCtx(zds->dctx); ++ zds->dctx = NULL; ++ ZSTD_freeDDict(zds->ddictLocal); ++ zds->ddictLocal = NULL; ++ ZSTD_free(zds->inBuff, cMem); ++ zds->inBuff = NULL; ++ ZSTD_free(zds->outBuff, cMem); ++ zds->outBuff = NULL; ++ ZSTD_free(zds, cMem); ++ return 0; ++ } ++} ++ ++ ++/* *** Initialization *** */ ++ ++size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } ++size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } ++ ++size_t ZSTD_resetDStream(ZSTD_DStream* zds) ++{ ++ zds->stage = zdss_loadHeader; ++ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; ++ zds->legacyVersion = 0; ++ zds->hostageByte = 0; ++ return ZSTD_frameHeaderSize_prefix; ++} ++ ++/* ***** Decompression ***** */ ++ ++MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) ++{ ++ size_t const length = MIN(dstCapacity, srcSize); ++ memcpy(dst, src, length); ++ return length; ++} ++ ++ ++size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) ++{ ++ const char* const istart = (const char*)(input->src) + input->pos; ++ const char* const iend = (const char*)(input->src) + input->size; ++ const char* ip = istart; ++ char* const ostart = (char*)(output->dst) + output->pos; ++ char* const oend = (char*)(output->dst) + output->size; ++ char* op = ostart; ++ U32 someMoreWork = 1; ++ ++ while (someMoreWork) { ++ switch(zds->stage) ++ { ++ case zdss_init : ++ ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ ++ /* fall-through */ ++ ++ case zdss_loadHeader : ++ { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); ++ if (ZSTD_isError(hSize)) ++ return hSize; ++ if (hSize != 0) { /* need more input */ ++ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ ++ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ ++ memcpy(zds->headerBuffer + zds->lhSize, ip, iend-ip); ++ zds->lhSize += iend-ip; ++ input->pos = input->size; ++ return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ ++ } ++ memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; ++ break; ++ } } ++ ++ /* check for single-pass mode opportunity */ ++ if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ ++ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { ++ size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); ++ if (cSize <= (size_t)(iend-istart)) { ++ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend-op, istart, cSize, zds->ddict); ++ if (ZSTD_isError(decompressedSize)) return decompressedSize; ++ ip = istart + cSize; ++ op += decompressedSize; ++ zds->dctx->expected = 0; ++ zds->stage = zdss_init; ++ someMoreWork = 0; ++ break; ++ } } ++ ++ /* Consume header */ ++ ZSTD_refDDict(zds->dctx, zds->ddict); ++ { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ ++ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); ++ { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size)); ++ } } ++ ++ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); ++ if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); ++ ++ /* Adapt buffer sizes to frame header instructions */ ++ { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); ++ size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; ++ zds->blockSize = blockSize; ++ if (zds->inBuffSize < blockSize) { ++ ZSTD_free(zds->inBuff, zds->customMem); ++ zds->inBuffSize = blockSize; ++ zds->inBuff = (char*)ZSTD_malloc(blockSize, zds->customMem); ++ if (zds->inBuff == NULL) return ERROR(memory_allocation); ++ } ++ if (zds->outBuffSize < neededOutSize) { ++ ZSTD_free(zds->outBuff, zds->customMem); ++ zds->outBuffSize = neededOutSize; ++ zds->outBuff = (char*)ZSTD_malloc(neededOutSize, zds->customMem); ++ if (zds->outBuff == NULL) return ERROR(memory_allocation); ++ } } ++ zds->stage = zdss_read; ++ /* pass-through */ ++ ++ case zdss_read: ++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ if (neededInSize==0) { /* end of frame */ ++ zds->stage = zdss_init; ++ someMoreWork = 0; ++ break; ++ } ++ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ ++ const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); ++ size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, ++ zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ++ ip, neededInSize); ++ if (ZSTD_isError(decodedSize)) return decodedSize; ++ ip += neededInSize; ++ if (!decodedSize && !isSkipFrame) break; /* this was just a header */ ++ zds->outEnd = zds->outStart + decodedSize; ++ zds->stage = zdss_flush; ++ break; ++ } ++ if (ip==iend) { someMoreWork = 0; break; } /* no more input */ ++ zds->stage = zdss_load; ++ /* pass-through */ ++ } ++ ++ case zdss_load: ++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ ++ size_t loadedSize; ++ if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ ++ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); ++ ip += loadedSize; ++ zds->inPos += loadedSize; ++ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ ++ ++ /* decode loaded input */ ++ { const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); ++ size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, ++ zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, ++ zds->inBuff, neededInSize); ++ if (ZSTD_isError(decodedSize)) return decodedSize; ++ zds->inPos = 0; /* input is consumed */ ++ if (!decodedSize && !isSkipFrame) { zds->stage = zdss_read; break; } /* this was just a header */ ++ zds->outEnd = zds->outStart + decodedSize; ++ zds->stage = zdss_flush; ++ /* pass-through */ ++ } } ++ ++ case zdss_flush: ++ { size_t const toFlushSize = zds->outEnd - zds->outStart; ++ size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); ++ op += flushedSize; ++ zds->outStart += flushedSize; ++ if (flushedSize == toFlushSize) { /* flush completed */ ++ zds->stage = zdss_read; ++ if (zds->outStart + zds->blockSize > zds->outBuffSize) ++ zds->outStart = zds->outEnd = 0; ++ break; ++ } ++ /* cannot complete flush */ ++ someMoreWork = 0; ++ break; ++ } ++ default: return ERROR(GENERIC); /* impossible */ ++ } } ++ ++ /* result */ ++ input->pos += (size_t)(ip-istart); ++ output->pos += (size_t)(op-ostart); ++ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); ++ if (!nextSrcSizeHint) { /* frame fully decoded */ ++ if (zds->outEnd == zds->outStart) { /* output fully flushed */ ++ if (zds->hostageByte) { ++ if (input->pos >= input->size) { zds->stage = zdss_read; return 1; } /* can't release hostage (not present) */ ++ input->pos++; /* release hostage */ ++ } ++ return 0; ++ } ++ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ ++ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ ++ zds->hostageByte=1; ++ } ++ return 1; ++ } ++ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ ++ if (zds->inPos > nextSrcSizeHint) return ERROR(GENERIC); /* should never happen */ ++ nextSrcSizeHint -= zds->inPos; /* already loaded*/ ++ return nextSrcSizeHint; ++ } ++} ++ ++EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initDCtx); ++EXPORT_SYMBOL(ZSTD_decompressDCtx); ++EXPORT_SYMBOL(ZSTD_decompress_usingDict); ++ ++EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initDDict); ++EXPORT_SYMBOL(ZSTD_decompress_usingDDict); ++ ++EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound); ++EXPORT_SYMBOL(ZSTD_initDStream); ++EXPORT_SYMBOL(ZSTD_initDStream_usingDDict); ++EXPORT_SYMBOL(ZSTD_resetDStream); ++EXPORT_SYMBOL(ZSTD_decompressStream); ++EXPORT_SYMBOL(ZSTD_DStreamInSize); ++EXPORT_SYMBOL(ZSTD_DStreamOutSize); ++ ++EXPORT_SYMBOL(ZSTD_findFrameCompressedSize); ++EXPORT_SYMBOL(ZSTD_getFrameContentSize); ++EXPORT_SYMBOL(ZSTD_findDecompressedSize); ++ ++EXPORT_SYMBOL(ZSTD_isFrame); ++EXPORT_SYMBOL(ZSTD_getDictID_fromDict); ++EXPORT_SYMBOL(ZSTD_getDictID_fromDDict); ++EXPORT_SYMBOL(ZSTD_getDictID_fromFrame); ++ ++EXPORT_SYMBOL(ZSTD_getFrameParams); ++EXPORT_SYMBOL(ZSTD_decompressBegin); ++EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict); ++EXPORT_SYMBOL(ZSTD_copyDCtx); ++EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress); ++EXPORT_SYMBOL(ZSTD_decompressContinue); ++EXPORT_SYMBOL(ZSTD_nextInputType); ++ ++EXPORT_SYMBOL(ZSTD_decompressBlock); ++EXPORT_SYMBOL(ZSTD_insertBlock); ++ ++MODULE_LICENSE("BSD"); ++MODULE_DESCRIPTION("Zstd Decompressor"); +diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c +new file mode 100644 +index 0000000..68d88082 +--- /dev/null ++++ b/lib/zstd/entropy_common.c +@@ -0,0 +1,217 @@ ++/* ++ Common functions of New Generation Entropy library ++ Copyright (C) 2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy ++ - Public forum : https://groups.google.com/forum/#!forum/lz4c ++*************************************************************************** */ ++ ++/* ************************************* ++* Dependencies ++***************************************/ ++#include "mem.h" ++#include "error_private.h" /* ERR_*, ERROR */ ++#include "fse.h" ++#include "huf.h" ++ ++ ++/*=== Version ===*/ ++unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } ++ ++ ++/*=== Error Management ===*/ ++unsigned FSE_isError(size_t code) { return ERR_isError(code); } ++ ++unsigned HUF_isError(size_t code) { return ERR_isError(code); } ++ ++ ++/*-************************************************************** ++* FSE NCount encoding-decoding ++****************************************************************/ ++size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, ++ const void* headerBuffer, size_t hbSize) ++{ ++ const BYTE* const istart = (const BYTE*) headerBuffer; ++ const BYTE* const iend = istart + hbSize; ++ const BYTE* ip = istart; ++ int nbBits; ++ int remaining; ++ int threshold; ++ U32 bitStream; ++ int bitCount; ++ unsigned charnum = 0; ++ int previous0 = 0; ++ ++ if (hbSize < 4) return ERROR(srcSize_wrong); ++ bitStream = MEM_readLE32(ip); ++ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ ++ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); ++ bitStream >>= 4; ++ bitCount = 4; ++ *tableLogPtr = nbBits; ++ remaining = (1<1) & (charnum<=*maxSVPtr)) { ++ if (previous0) { ++ unsigned n0 = charnum; ++ while ((bitStream & 0xFFFF) == 0xFFFF) { ++ n0 += 24; ++ if (ip < iend-5) { ++ ip += 2; ++ bitStream = MEM_readLE32(ip) >> bitCount; ++ } else { ++ bitStream >>= 16; ++ bitCount += 16; ++ } } ++ while ((bitStream & 3) == 3) { ++ n0 += 3; ++ bitStream >>= 2; ++ bitCount += 2; ++ } ++ n0 += bitStream & 3; ++ bitCount += 2; ++ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); ++ while (charnum < n0) normalizedCounter[charnum++] = 0; ++ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ++ ip += bitCount>>3; ++ bitCount &= 7; ++ bitStream = MEM_readLE32(ip) >> bitCount; ++ } else { ++ bitStream >>= 2; ++ } } ++ { int const max = (2*threshold-1) - remaining; ++ int count; ++ ++ if ((bitStream & (threshold-1)) < (U32)max) { ++ count = bitStream & (threshold-1); ++ bitCount += nbBits-1; ++ } else { ++ count = bitStream & (2*threshold-1); ++ if (count >= threshold) count -= max; ++ bitCount += nbBits; ++ } ++ ++ count--; /* extra accuracy */ ++ remaining -= count < 0 ? -count : count; /* -1 means +1 */ ++ normalizedCounter[charnum++] = (short)count; ++ previous0 = !count; ++ while (remaining < threshold) { ++ nbBits--; ++ threshold >>= 1; ++ } ++ ++ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ++ ip += bitCount>>3; ++ bitCount &= 7; ++ } else { ++ bitCount -= (int)(8 * (iend - 4 - ip)); ++ ip = iend - 4; ++ } ++ bitStream = MEM_readLE32(ip) >> (bitCount & 31); ++ } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ ++ if (remaining != 1) return ERROR(corruption_detected); ++ if (bitCount > 32) return ERROR(corruption_detected); ++ *maxSVPtr = charnum-1; ++ ++ ip += (bitCount+7)>>3; ++ return ip-istart; ++} ++ ++ ++/*! HUF_readStats() : ++ Read compact Huffman tree, saved by HUF_writeCTable(). ++ `huffWeight` is destination buffer. ++ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. ++ @return : size read from `src` , or an error Code . ++ Note : Needed by HUF_readCTable() and HUF_readDTableX?() . ++*/ ++size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, ++ U32* nbSymbolsPtr, U32* tableLogPtr, ++ const void* src, size_t srcSize) ++{ ++ U32 weightTotal; ++ const BYTE* ip = (const BYTE*) src; ++ size_t iSize; ++ size_t oSize; ++ ++ if (!srcSize) return ERROR(srcSize_wrong); ++ iSize = ip[0]; ++ /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ ++ ++ if (iSize >= 128) { /* special header */ ++ oSize = iSize - 127; ++ iSize = ((oSize+1)/2); ++ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); ++ if (oSize >= hwSize) return ERROR(corruption_detected); ++ ip += 1; ++ { U32 n; ++ for (n=0; n> 4; ++ huffWeight[n+1] = ip[n/2] & 15; ++ } } } ++ else { /* header compressed with FSE (normal case) */ ++ FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ ++ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); ++ oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ ++ if (FSE_isError(oSize)) return oSize; ++ } ++ ++ /* collect weight stats */ ++ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); ++ weightTotal = 0; ++ { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); ++ rankStats[huffWeight[n]]++; ++ weightTotal += (1 << huffWeight[n]) >> 1; ++ } } ++ if (weightTotal == 0) return ERROR(corruption_detected); ++ ++ /* get last non-null symbol weight (implied, total must be 2^n) */ ++ { U32 const tableLog = BIT_highbit32(weightTotal) + 1; ++ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); ++ *tableLogPtr = tableLog; ++ /* determine last weight */ ++ { U32 const total = 1 << tableLog; ++ U32 const rest = total - weightTotal; ++ U32 const verif = 1 << BIT_highbit32(rest); ++ U32 const lastWeight = BIT_highbit32(rest) + 1; ++ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ ++ huffWeight[oSize] = (BYTE)lastWeight; ++ rankStats[lastWeight]++; ++ } } ++ ++ /* check tree construction validity */ ++ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ ++ ++ /* results */ ++ *nbSymbolsPtr = (U32)(oSize+1); ++ return iSize+1; ++} +diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h +new file mode 100644 +index 0000000..8cf148b +--- /dev/null ++++ b/lib/zstd/error_private.h +@@ -0,0 +1,44 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++/* Note : this module is expected to remain private, do not expose it */ ++ ++#ifndef ERROR_H_MODULE ++#define ERROR_H_MODULE ++ ++/* **************************************** ++* Dependencies ++******************************************/ ++#include /* size_t */ ++#include /* enum list */ ++ ++ ++/* **************************************** ++* Compiler-specific ++******************************************/ ++#define ERR_STATIC static __attribute__((unused)) ++ ++ ++/*-**************************************** ++* Customization (error_public.h) ++******************************************/ ++typedef ZSTD_ErrorCode ERR_enum; ++#define PREFIX(name) ZSTD_error_##name ++ ++ ++/*-**************************************** ++* Error codes handling ++******************************************/ ++#define ERROR(name) ((size_t)-PREFIX(name)) ++ ++ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ++ ++ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } ++ ++#endif /* ERROR_H_MODULE */ +diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h +new file mode 100644 +index 0000000..14fa439 +--- /dev/null ++++ b/lib/zstd/fse.h +@@ -0,0 +1,606 @@ ++/* ****************************************************************** ++ FSE : Finite State Entropy codec ++ Public Prototypes declaration ++ Copyright (C) 2013-2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ++****************************************************************** */ ++#ifndef FSE_H ++#define FSE_H ++ ++ ++/*-***************************************** ++* Dependencies ++******************************************/ ++#include /* size_t, ptrdiff_t */ ++ ++ ++/*-***************************************** ++* FSE_PUBLIC_API : control library symbols visibility ++******************************************/ ++#define FSE_PUBLIC_API ++ ++/*------ Version ------*/ ++#define FSE_VERSION_MAJOR 0 ++#define FSE_VERSION_MINOR 9 ++#define FSE_VERSION_RELEASE 0 ++ ++#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE ++#define FSE_QUOTE(str) #str ++#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) ++#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) ++ ++#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) ++FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ ++ ++/*-***************************************** ++* Tool functions ++******************************************/ ++FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ ++ ++/* Error Management */ ++FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ ++ ++ ++/*-***************************************** ++* FSE detailed API ++******************************************/ ++/*! ++FSE_compress() does the following: ++1. count symbol occurrence from source[] into table count[] ++2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) ++3. save normalized counters to memory buffer using writeNCount() ++4. build encoding table 'CTable' from normalized counters ++5. encode the data stream using encoding table 'CTable' ++ ++FSE_decompress() does the following: ++1. read normalized counters with readNCount() ++2. build decoding table 'DTable' from normalized counters ++3. decode the data stream using decoding table 'DTable' ++ ++The following API allows targeting specific sub-functions for advanced tasks. ++For example, it's possible to compress several blocks using the same 'CTable', ++or to save and provide normalized distribution using external method. ++*/ ++ ++/* *** COMPRESSION *** */ ++/*! FSE_optimalTableLog(): ++ dynamically downsize 'tableLog' when conditions are met. ++ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. ++ @return : recommended tableLog (necessarily <= 'maxTableLog') */ ++FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); ++ ++/*! FSE_normalizeCount(): ++ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) ++ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). ++ @return : tableLog, ++ or an errorCode, which can be tested using FSE_isError() */ ++FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); ++ ++/*! FSE_NCountWriteBound(): ++ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. ++ Typically useful for allocation purpose. */ ++FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); ++ ++/*! FSE_writeNCount(): ++ Compactly save 'normalizedCounter' into 'buffer'. ++ @return : size of the compressed table, ++ or an errorCode, which can be tested using FSE_isError(). */ ++FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); ++ ++ ++/*! Constructor and Destructor of FSE_CTable. ++ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ ++typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ ++ ++/*! FSE_compress_usingCTable(): ++ Compress `src` using `ct` into `dst` which must be already allocated. ++ @return : size of compressed data (<= `dstCapacity`), ++ or 0 if compressed data could not fit into `dst`, ++ or an errorCode, which can be tested using FSE_isError() */ ++FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); ++ ++/*! ++Tutorial : ++---------- ++The first step is to count all symbols. FSE_count() does this job very fast. ++Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. ++'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] ++maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) ++FSE_count() will return the number of occurrence of the most frequent symbol. ++This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. ++If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). ++ ++The next step is to normalize the frequencies. ++FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. ++It also guarantees a minimum of 1 to any Symbol with frequency >= 1. ++You can use 'tableLog'==0 to mean "use default tableLog value". ++If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), ++which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). ++ ++The result of FSE_normalizeCount() will be saved into a table, ++called 'normalizedCounter', which is a table of signed short. ++'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. ++The return value is tableLog if everything proceeded as expected. ++It is 0 if there is a single symbol within distribution. ++If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). ++ ++'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). ++'buffer' must be already allocated. ++For guaranteed success, buffer size must be at least FSE_headerBound(). ++The result of the function is the number of bytes written into 'buffer'. ++If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). ++ ++'normalizedCounter' can then be used to create the compression table 'CTable'. ++The space required by 'CTable' must be already allocated, using FSE_createCTable(). ++You can then use FSE_buildCTable() to fill 'CTable'. ++If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). ++ ++'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). ++Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' ++The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. ++If it returns '0', compressed data could not fit into 'dst'. ++If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). ++*/ ++ ++ ++/* *** DECOMPRESSION *** */ ++ ++/*! FSE_readNCount(): ++ Read compactly saved 'normalizedCounter' from 'rBuffer'. ++ @return : size read from 'rBuffer', ++ or an errorCode, which can be tested using FSE_isError(). ++ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ ++FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); ++ ++/*! Constructor and Destructor of FSE_DTable. ++ Note that its size depends on 'tableLog' */ ++typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ ++ ++/*! FSE_buildDTable(): ++ Builds 'dt', which must be already allocated, using FSE_createDTable(). ++ return : 0, or an errorCode, which can be tested using FSE_isError() */ ++FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); ++ ++/*! FSE_decompress_usingDTable(): ++ Decompress compressed source `cSrc` of size `cSrcSize` using `dt` ++ into `dst` which must be already allocated. ++ @return : size of regenerated data (necessarily <= `dstCapacity`), ++ or an errorCode, which can be tested using FSE_isError() */ ++FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); ++ ++/*! ++Tutorial : ++---------- ++(Note : these functions only decompress FSE-compressed blocks. ++ If block is uncompressed, use memcpy() instead ++ If block is a single repeated byte, use memset() instead ) ++ ++The first step is to obtain the normalized frequencies of symbols. ++This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). ++'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. ++In practice, that means it's necessary to know 'maxSymbolValue' beforehand, ++or size the table to handle worst case situations (typically 256). ++FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. ++The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. ++Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. ++If there is an error, the function will return an error code, which can be tested using FSE_isError(). ++ ++The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. ++This is performed by the function FSE_buildDTable(). ++The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). ++If there is an error, the function will return an error code, which can be tested using FSE_isError(). ++ ++`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). ++`cSrcSize` must be strictly correct, otherwise decompression will fail. ++FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). ++If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) ++*/ ++ ++ ++/* *** Dependency *** */ ++#include "bitstream.h" ++ ++ ++/* ***************************************** ++* Static allocation ++*******************************************/ ++/* FSE buffer bounds */ ++#define FSE_NCOUNTBOUND 512 ++#define FSE_BLOCKBOUND(size) (size + (size>>7)) ++#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ ++ ++/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ ++#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) ++#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= `1024` unsigned ++ */ ++size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, ++ const void* source, size_t sourceSize, unsigned* workSpace); ++ ++/* FSE_countFast_wksp() : ++ * Same as FSE_countFast(), but using an externally provided scratch buffer. ++ * `workSpace` must be a table of minimum `1024` unsigned ++ */ ++size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); ++ ++/*! FSE_count_simple ++ * Same as FSE_countFast(), but does not use any additional memory (not even on stack). ++ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). ++*/ ++size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); ++ ++ ++ ++unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); ++/**< same as FSE_optimalTableLog(), which used `minus==2` */ ++ ++/* FSE_compress_wksp() : ++ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). ++ * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. ++ */ ++#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) ++size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); ++ ++size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); ++/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ ++ ++size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); ++/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ ++ ++/* FSE_buildCTable_wksp() : ++ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). ++ * `wkspSize` must be >= `(1<= BIT_DStream_completed ++ ++When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. ++Checking if DStream has reached its end is performed by : ++ BIT_endOfDStream(&DStream); ++Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. ++ FSE_endOfDState(&DState); ++*/ ++ ++ ++/* ***************************************** ++* FSE unsafe API ++*******************************************/ ++static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); ++/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ ++ ++ ++/* ***************************************** ++* Implementation of inlined functions ++*******************************************/ ++typedef struct { ++ int deltaFindState; ++ U32 deltaNbBits; ++} FSE_symbolCompressionTransform; /* total 8 bytes */ ++ ++MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) ++{ ++ const void* ptr = ct; ++ const U16* u16ptr = (const U16*) ptr; ++ const U32 tableLog = MEM_read16(ptr); ++ statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; ++ statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); ++ statePtr->stateLog = tableLog; ++} ++ ++ ++/*! FSE_initCState2() : ++* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) ++* uses the smallest state value possible, saving the cost of this symbol */ ++MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) ++{ ++ FSE_initCState(statePtr, ct); ++ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; ++ const U16* stateTable = (const U16*)(statePtr->stateTable); ++ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); ++ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; ++ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; ++ } ++} ++ ++MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) ++{ ++ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; ++ const U16* const stateTable = (const U16*)(statePtr->stateTable); ++ U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); ++ BIT_addBits(bitC, statePtr->value, nbBitsOut); ++ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; ++} ++ ++MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) ++{ ++ BIT_addBits(bitC, statePtr->value, statePtr->stateLog); ++ BIT_flushBits(bitC); ++} ++ ++ ++/* ====== Decompression ====== */ ++ ++typedef struct { ++ U16 tableLog; ++ U16 fastMode; ++} FSE_DTableHeader; /* sizeof U32 */ ++ ++typedef struct ++{ ++ unsigned short newState; ++ unsigned char symbol; ++ unsigned char nbBits; ++} FSE_decode_t; /* size == U32 */ ++ ++MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) ++{ ++ const void* ptr = dt; ++ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; ++ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); ++ BIT_reloadDStream(bitD); ++ DStatePtr->table = dt + 1; ++} ++ ++MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) ++{ ++ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; ++ return DInfo.symbol; ++} ++ ++MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) ++{ ++ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; ++ U32 const nbBits = DInfo.nbBits; ++ size_t const lowBits = BIT_readBits(bitD, nbBits); ++ DStatePtr->state = DInfo.newState + lowBits; ++} ++ ++MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) ++{ ++ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; ++ U32 const nbBits = DInfo.nbBits; ++ BYTE const symbol = DInfo.symbol; ++ size_t const lowBits = BIT_readBits(bitD, nbBits); ++ ++ DStatePtr->state = DInfo.newState + lowBits; ++ return symbol; ++} ++ ++/*! FSE_decodeSymbolFast() : ++ unsafe, only works if no symbol has a probability > 50% */ ++MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) ++{ ++ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; ++ U32 const nbBits = DInfo.nbBits; ++ BYTE const symbol = DInfo.symbol; ++ size_t const lowBits = BIT_readBitsFast(bitD, nbBits); ++ ++ DStatePtr->state = DInfo.newState + lowBits; ++ return symbol; ++} ++ ++MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) ++{ ++ return DStatePtr->state == 0; ++} ++ ++ ++ ++#ifndef FSE_COMMONDEFS_ONLY ++ ++/* ************************************************************** ++* Tuning parameters ++****************************************************************/ ++/*!MEMORY_USAGE : ++* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) ++* Increasing memory usage improves compression ratio ++* Reduced memory usage can improve speed, due to cache effect ++* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ ++#ifndef FSE_MAX_MEMORY_USAGE ++# define FSE_MAX_MEMORY_USAGE 14 ++#endif ++#ifndef FSE_DEFAULT_MEMORY_USAGE ++# define FSE_DEFAULT_MEMORY_USAGE 13 ++#endif ++ ++/*!FSE_MAX_SYMBOL_VALUE : ++* Maximum symbol value authorized. ++* Required for proper stack allocation */ ++#ifndef FSE_MAX_SYMBOL_VALUE ++# define FSE_MAX_SYMBOL_VALUE 255 ++#endif ++ ++/* ************************************************************** ++* template functions type & suffix ++****************************************************************/ ++#define FSE_FUNCTION_TYPE BYTE ++#define FSE_FUNCTION_EXTENSION ++#define FSE_DECODE_TYPE FSE_decode_t ++ ++ ++#endif /* !FSE_COMMONDEFS_ONLY */ ++ ++ ++/* *************************************************************** ++* Constants ++*****************************************************************/ ++#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) ++#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX ++# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" ++#endif ++ ++#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) ++ ++ ++#endif /* FSE_H */ +diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c +new file mode 100644 +index 0000000..b6a6d46 +--- /dev/null ++++ b/lib/zstd/fse_compress.c +@@ -0,0 +1,788 @@ ++/* ****************************************************************** ++ FSE : Finite State Entropy encoder ++ Copyright (C) 2013-2015, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy ++ - Public forum : https://groups.google.com/forum/#!forum/lz4c ++****************************************************************** */ ++ ++/* ************************************************************** ++* Compiler specifics ++****************************************************************/ ++#define FORCE_INLINE static __always_inline ++ ++ ++/* ************************************************************** ++* Includes ++****************************************************************/ ++#include ++#include /* memcpy, memset */ ++#include "bitstream.h" ++#include "fse.h" ++ ++ ++/* ************************************************************** ++* Error Management ++****************************************************************/ ++#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ ++ ++ ++/* ************************************************************** ++* Templates ++****************************************************************/ ++/* ++ designed to be included ++ for type-specific functions (template emulation in C) ++ Objective is to write these functions only once, for improved maintenance ++*/ ++ ++/* safety checks */ ++#ifndef FSE_FUNCTION_EXTENSION ++# error "FSE_FUNCTION_EXTENSION must be defined" ++#endif ++#ifndef FSE_FUNCTION_TYPE ++# error "FSE_FUNCTION_TYPE must be defined" ++#endif ++ ++/* Function names */ ++#define FSE_CAT(X,Y) X##Y ++#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) ++#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) ++ ++ ++/* Function templates */ ++ ++/* FSE_buildCTable_wksp() : ++ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). ++ * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; ++ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); ++ U32 const step = FSE_TABLESTEP(tableSize); ++ U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; ++ ++ FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; ++ U32 highThreshold = tableSize-1; ++ ++ /* CTable header */ ++ if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); ++ tableU16[-2] = (U16) tableLog; ++ tableU16[-1] = (U16) maxSymbolValue; ++ ++ /* For explanations on how to distribute symbol values over the table : ++ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ ++ ++ /* symbol start positions */ ++ { U32 u; ++ cumul[0] = 0; ++ for (u=1; u<=maxSymbolValue+1; u++) { ++ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ ++ cumul[u] = cumul[u-1] + 1; ++ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); ++ } else { ++ cumul[u] = cumul[u-1] + normalizedCounter[u-1]; ++ } } ++ cumul[maxSymbolValue+1] = tableSize+1; ++ } ++ ++ /* Spread symbols */ ++ { U32 position = 0; ++ U32 symbol; ++ for (symbol=0; symbol<=maxSymbolValue; symbol++) { ++ int nbOccurences; ++ for (nbOccurences=0; nbOccurences highThreshold) position = (position + step) & tableMask; /* Low proba area */ ++ } } ++ ++ if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ ++ } ++ ++ /* Build table */ ++ { U32 u; for (u=0; u> 3) + 3; ++ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ ++} ++ ++static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, ++ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, ++ unsigned writeIsSafe) ++{ ++ BYTE* const ostart = (BYTE*) header; ++ BYTE* out = ostart; ++ BYTE* const oend = ostart + headerBufferSize; ++ int nbBits; ++ const int tableSize = 1 << tableLog; ++ int remaining; ++ int threshold; ++ U32 bitStream; ++ int bitCount; ++ unsigned charnum = 0; ++ int previous0 = 0; ++ ++ bitStream = 0; ++ bitCount = 0; ++ /* Table Size */ ++ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; ++ bitCount += 4; ++ ++ /* Init */ ++ remaining = tableSize+1; /* +1 for extra accuracy */ ++ threshold = tableSize; ++ nbBits = tableLog+1; ++ ++ while (remaining>1) { /* stops at 1 */ ++ if (previous0) { ++ unsigned start = charnum; ++ while (!normalizedCounter[charnum]) charnum++; ++ while (charnum >= start+24) { ++ start+=24; ++ bitStream += 0xFFFFU << bitCount; ++ if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ ++ out[0] = (BYTE) bitStream; ++ out[1] = (BYTE)(bitStream>>8); ++ out+=2; ++ bitStream>>=16; ++ } ++ while (charnum >= start+3) { ++ start+=3; ++ bitStream += 3 << bitCount; ++ bitCount += 2; ++ } ++ bitStream += (charnum-start) << bitCount; ++ bitCount += 2; ++ if (bitCount>16) { ++ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ ++ out[0] = (BYTE)bitStream; ++ out[1] = (BYTE)(bitStream>>8); ++ out += 2; ++ bitStream >>= 16; ++ bitCount -= 16; ++ } } ++ { int count = normalizedCounter[charnum++]; ++ int const max = (2*threshold-1)-remaining; ++ remaining -= count < 0 ? -count : count; ++ count++; /* +1 for extra accuracy */ ++ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ ++ bitStream += count << bitCount; ++ bitCount += nbBits; ++ bitCount -= (count>=1; ++ } ++ if (bitCount>16) { ++ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ ++ out[0] = (BYTE)bitStream; ++ out[1] = (BYTE)(bitStream>>8); ++ out += 2; ++ bitStream >>= 16; ++ bitCount -= 16; ++ } } ++ ++ /* flush remaining bitStream */ ++ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ ++ out[0] = (BYTE)bitStream; ++ out[1] = (BYTE)(bitStream>>8); ++ out+= (bitCount+7) /8; ++ ++ if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); ++ ++ return (out-ostart); ++} ++ ++ ++size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) ++{ ++ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ ++ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ ++ ++ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) ++ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); ++ ++ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); ++} ++ ++ ++ ++/*-************************************************************** ++* Counting histogram ++****************************************************************/ ++/*! FSE_count_simple ++ This function counts byte values within `src`, and store the histogram into table `count`. ++ It doesn't use any additional memory. ++ But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. ++ For this reason, prefer using a table `count` with 256 elements. ++ @return : count of most numerous element ++*/ ++size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, ++ const void* src, size_t srcSize) ++{ ++ const BYTE* ip = (const BYTE*)src; ++ const BYTE* const end = ip + srcSize; ++ unsigned maxSymbolValue = *maxSymbolValuePtr; ++ unsigned max=0; ++ ++ memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); ++ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } ++ ++ while (ip max) max = count[s]; } ++ ++ return (size_t)max; ++} ++ ++ ++/* FSE_count_parallel_wksp() : ++ * Same as FSE_count_parallel(), but using an externally provided scratch buffer. ++ * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ ++static size_t FSE_count_parallel_wksp( ++ unsigned* count, unsigned* maxSymbolValuePtr, ++ const void* source, size_t sourceSize, ++ unsigned checkMax, unsigned* const workSpace) ++{ ++ const BYTE* ip = (const BYTE*)source; ++ const BYTE* const iend = ip+sourceSize; ++ unsigned maxSymbolValue = *maxSymbolValuePtr; ++ unsigned max=0; ++ U32* const Counting1 = workSpace; ++ U32* const Counting2 = Counting1 + 256; ++ U32* const Counting3 = Counting2 + 256; ++ U32* const Counting4 = Counting3 + 256; ++ ++ memset(Counting1, 0, 4*256*sizeof(unsigned)); ++ ++ /* safety checks */ ++ if (!sourceSize) { ++ memset(count, 0, maxSymbolValue + 1); ++ *maxSymbolValuePtr = 0; ++ return 0; ++ } ++ if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ ++ ++ /* by stripes of 16 bytes */ ++ { U32 cached = MEM_read32(ip); ip += 4; ++ while (ip < iend-15) { ++ U32 c = cached; cached = MEM_read32(ip); ip += 4; ++ Counting1[(BYTE) c ]++; ++ Counting2[(BYTE)(c>>8) ]++; ++ Counting3[(BYTE)(c>>16)]++; ++ Counting4[ c>>24 ]++; ++ c = cached; cached = MEM_read32(ip); ip += 4; ++ Counting1[(BYTE) c ]++; ++ Counting2[(BYTE)(c>>8) ]++; ++ Counting3[(BYTE)(c>>16)]++; ++ Counting4[ c>>24 ]++; ++ c = cached; cached = MEM_read32(ip); ip += 4; ++ Counting1[(BYTE) c ]++; ++ Counting2[(BYTE)(c>>8) ]++; ++ Counting3[(BYTE)(c>>16)]++; ++ Counting4[ c>>24 ]++; ++ c = cached; cached = MEM_read32(ip); ip += 4; ++ Counting1[(BYTE) c ]++; ++ Counting2[(BYTE)(c>>8) ]++; ++ Counting3[(BYTE)(c>>16)]++; ++ Counting4[ c>>24 ]++; ++ } ++ ip-=4; ++ } ++ ++ /* finish last symbols */ ++ while (ipmaxSymbolValue; s--) { ++ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; ++ if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); ++ } } ++ ++ { U32 s; for (s=0; s<=maxSymbolValue; s++) { ++ count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; ++ if (count[s] > max) max = count[s]; ++ } } ++ ++ while (!count[maxSymbolValue]) maxSymbolValue--; ++ *maxSymbolValuePtr = maxSymbolValue; ++ return (size_t)max; ++} ++ ++/* FSE_countFast_wksp() : ++ * Same as FSE_countFast(), but using an externally provided scratch buffer. ++ * `workSpace` size must be table of >= `1024` unsigned */ ++size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, ++ const void* source, size_t sourceSize, unsigned* workSpace) ++{ ++ if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); ++ return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); ++} ++ ++/* FSE_count_wksp() : ++ * Same as FSE_count(), but using an externally provided scratch buffer. ++ * `workSpace` size must be table of >= `1024` unsigned */ ++size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, ++ const void* source, size_t sourceSize, unsigned* workSpace) ++{ ++ if (*maxSymbolValuePtr < 255) ++ return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); ++ *maxSymbolValuePtr = 255; ++ return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); ++} ++ ++ ++/*-************************************************************** ++* FSE Compression Code ++****************************************************************/ ++/*! FSE_sizeof_CTable() : ++ FSE_CTable is a variable size structure which contains : ++ `U16 tableLog;` ++ `U16 maxSymbolValue;` ++ `U16 nextStateNumber[1 << tableLog];` // This size is variable ++ `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable ++Allocation is manual (C standard does not support variable-size structures). ++*/ ++size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) ++{ ++ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); ++ return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); ++} ++ ++/* provides the minimum logSize to safely represent a distribution */ ++static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) ++{ ++ U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; ++ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; ++ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; ++ return minBits; ++} ++ ++unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) ++{ ++ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; ++ U32 tableLog = maxTableLog; ++ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); ++ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; ++ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ ++ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ ++ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; ++ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; ++ return tableLog; ++} ++ ++unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) ++{ ++ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); ++} ++ ++ ++/* Secondary normalization method. ++ To be used when primary method fails. */ ++ ++static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) ++{ ++ short const NOT_YET_ASSIGNED = -2; ++ U32 s; ++ U32 distributed = 0; ++ U32 ToDistribute; ++ ++ /* Init */ ++ U32 const lowThreshold = (U32)(total >> tableLog); ++ U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); ++ ++ for (s=0; s<=maxSymbolValue; s++) { ++ if (count[s] == 0) { ++ norm[s]=0; ++ continue; ++ } ++ if (count[s] <= lowThreshold) { ++ norm[s] = -1; ++ distributed++; ++ total -= count[s]; ++ continue; ++ } ++ if (count[s] <= lowOne) { ++ norm[s] = 1; ++ distributed++; ++ total -= count[s]; ++ continue; ++ } ++ ++ norm[s]=NOT_YET_ASSIGNED; ++ } ++ ToDistribute = (1 << tableLog) - distributed; ++ ++ if ((total / ToDistribute) > lowOne) { ++ /* risk of rounding to zero */ ++ lowOne = (U32)((total * 3) / (ToDistribute * 2)); ++ for (s=0; s<=maxSymbolValue; s++) { ++ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { ++ norm[s] = 1; ++ distributed++; ++ total -= count[s]; ++ continue; ++ } } ++ ToDistribute = (1 << tableLog) - distributed; ++ } ++ ++ if (distributed == maxSymbolValue+1) { ++ /* all values are pretty poor; ++ probably incompressible data (should have already been detected); ++ find max, then give all remaining points to max */ ++ U32 maxV = 0, maxC = 0; ++ for (s=0; s<=maxSymbolValue; s++) ++ if (count[s] > maxC) maxV=s, maxC=count[s]; ++ norm[maxV] += (short)ToDistribute; ++ return 0; ++ } ++ ++ if (total == 0) { ++ /* all of the symbols were low enough for the lowOne or lowThreshold */ ++ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) ++ if (norm[s] > 0) ToDistribute--, norm[s]++; ++ return 0; ++ } ++ ++ { U64 const vStepLog = 62 - tableLog; ++ U64 const mid = (1ULL << (vStepLog-1)) - 1; ++ U64 const rStep = ((((U64)1<> vStepLog); ++ U32 const sEnd = (U32)(end >> vStepLog); ++ U32 const weight = sEnd - sStart; ++ if (weight < 1) ++ return ERROR(GENERIC); ++ norm[s] = (short)weight; ++ tmpTotal = end; ++ } } } ++ ++ return 0; ++} ++ ++ ++size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, ++ const unsigned* count, size_t total, ++ unsigned maxSymbolValue) ++{ ++ /* Sanity checks */ ++ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; ++ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ ++ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ ++ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ ++ ++ { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; ++ U64 const scale = 62 - tableLog; ++ U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ ++ U64 const vStep = 1ULL<<(scale-20); ++ int stillToDistribute = 1<> tableLog); ++ ++ for (s=0; s<=maxSymbolValue; s++) { ++ if (count[s] == total) return 0; /* rle special case */ ++ if (count[s] == 0) { normalizedCounter[s]=0; continue; } ++ if (count[s] <= lowThreshold) { ++ normalizedCounter[s] = -1; ++ stillToDistribute--; ++ } else { ++ short proba = (short)((count[s]*step) >> scale); ++ if (proba<8) { ++ U64 restToBeat = vStep * rtbTable[proba]; ++ proba += (count[s]*step) - ((U64)proba< restToBeat; ++ } ++ if (proba > largestP) largestP=proba, largest=s; ++ normalizedCounter[s] = proba; ++ stillToDistribute -= proba; ++ } } ++ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { ++ /* corner case, need another normalization method */ ++ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); ++ if (FSE_isError(errorCode)) return errorCode; ++ } ++ else normalizedCounter[largest] += (short)stillToDistribute; ++ } ++ ++#if 0 ++ { /* Print Table (debug) */ ++ U32 s; ++ U32 nTotal = 0; ++ for (s=0; s<=maxSymbolValue; s++) ++ printf("%3i: %4i \n", s, normalizedCounter[s]); ++ for (s=0; s<=maxSymbolValue; s++) ++ nTotal += abs(normalizedCounter[s]); ++ if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ ++ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); ++ unsigned s; ++ ++ /* Sanity checks */ ++ if (nbBits < 1) return ERROR(GENERIC); /* min size */ ++ ++ /* header */ ++ tableU16[-2] = (U16) nbBits; ++ tableU16[-1] = (U16) maxSymbolValue; ++ ++ /* Build table */ ++ for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ ++ FSE_encodeSymbol(&bitC, &CState2, *--ip); ++ FSE_encodeSymbol(&bitC, &CState1, *--ip); ++ FSE_FLUSHBITS(&bitC); ++ } ++ ++ /* 2 or 4 encoding per loop */ ++ while ( ip>istart ) { ++ ++ FSE_encodeSymbol(&bitC, &CState2, *--ip); ++ ++ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ ++ FSE_FLUSHBITS(&bitC); ++ ++ FSE_encodeSymbol(&bitC, &CState1, *--ip); ++ ++ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ ++ FSE_encodeSymbol(&bitC, &CState2, *--ip); ++ FSE_encodeSymbol(&bitC, &CState1, *--ip); ++ } ++ ++ FSE_FLUSHBITS(&bitC); ++ } ++ ++ FSE_flushCState(&bitC, &CState2); ++ FSE_flushCState(&bitC, &CState1); ++ return BIT_closeCStream(&bitC); ++} ++ ++size_t FSE_compress_usingCTable (void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ const FSE_CTable* ct) ++{ ++ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); ++ ++ if (fast) ++ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); ++ else ++ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); ++} ++ ++ ++size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } ++ ++#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f ++#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } ++ ++/* FSE_compress_wksp() : ++ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). ++ * `wkspSize` size must be `(1< not compressible */ ++ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ ++ } ++ ++ tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); ++ CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); ++ ++ /* Write table description header */ ++ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); ++ op += nc_err; ++ } ++ ++ /* Compress */ ++ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); ++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); ++ if (cSize == 0) return 0; /* not enough space for compressed data */ ++ op += cSize; ++ } ++ ++ /* check compressibility */ ++ if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; ++ ++ return op-ostart; ++} ++ ++ ++#endif /* FSE_COMMONDEFS_ONLY */ +diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c +new file mode 100644 +index 0000000..2a35f17 +--- /dev/null ++++ b/lib/zstd/fse_decompress.c +@@ -0,0 +1,292 @@ ++/* ****************************************************************** ++ FSE : Finite State Entropy decoder ++ Copyright (C) 2013-2015, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy ++ - Public forum : https://groups.google.com/forum/#!forum/lz4c ++****************************************************************** */ ++ ++ ++/* ************************************************************** ++* Compiler specifics ++****************************************************************/ ++#define FORCE_INLINE static __always_inline ++ ++ ++/* ************************************************************** ++* Includes ++****************************************************************/ ++#include ++#include /* memcpy, memset */ ++#include "bitstream.h" ++#include "fse.h" ++ ++ ++/* ************************************************************** ++* Error Management ++****************************************************************/ ++#define FSE_isError ERR_isError ++#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ ++ ++/* check and forward error code */ ++#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } ++ ++ ++/* ************************************************************** ++* Templates ++****************************************************************/ ++/* ++ designed to be included ++ for type-specific functions (template emulation in C) ++ Objective is to write these functions only once, for improved maintenance ++*/ ++ ++/* safety checks */ ++#ifndef FSE_FUNCTION_EXTENSION ++# error "FSE_FUNCTION_EXTENSION must be defined" ++#endif ++#ifndef FSE_FUNCTION_TYPE ++# error "FSE_FUNCTION_TYPE must be defined" ++#endif ++ ++/* Function names */ ++#define FSE_CAT(X,Y) X##Y ++#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) ++#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) ++ ++ ++/* Function templates */ ++ ++size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) ++{ ++ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ ++ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); ++ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; ++ ++ U32 const maxSV1 = maxSymbolValue + 1; ++ U32 const tableSize = 1 << tableLog; ++ U32 highThreshold = tableSize-1; ++ ++ /* Sanity Checks */ ++ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); ++ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); ++ ++ /* Init, lay down lowprob symbols */ ++ { FSE_DTableHeader DTableH; ++ DTableH.tableLog = (U16)tableLog; ++ DTableH.fastMode = 1; ++ { S16 const largeLimit= (S16)(1 << (tableLog-1)); ++ U32 s; ++ for (s=0; s= largeLimit) DTableH.fastMode=0; ++ symbolNext[s] = normalizedCounter[s]; ++ } } } ++ memcpy(dt, &DTableH, sizeof(DTableH)); ++ } ++ ++ /* Spread symbols */ ++ { U32 const tableMask = tableSize-1; ++ U32 const step = FSE_TABLESTEP(tableSize); ++ U32 s, position = 0; ++ for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ ++ } } ++ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ ++ } ++ ++ /* Build Decoding table */ ++ { U32 u; ++ for (u=0; utableLog = 0; ++ DTableH->fastMode = 0; ++ ++ cell->newState = 0; ++ cell->symbol = symbolValue; ++ cell->nbBits = 0; ++ ++ return 0; ++} ++ ++ ++size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) ++{ ++ void* ptr = dt; ++ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; ++ void* dPtr = dt + 1; ++ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; ++ const unsigned tableSize = 1 << nbBits; ++ const unsigned tableMask = tableSize - 1; ++ const unsigned maxSV1 = tableMask+1; ++ unsigned s; ++ ++ /* Sanity checks */ ++ if (nbBits < 1) return ERROR(GENERIC); /* min size */ ++ ++ /* Build Decoding Table */ ++ DTableH->tableLog = (U16)nbBits; ++ DTableH->fastMode = 1; ++ for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ ++ BIT_reloadDStream(&bitD); ++ ++ op[1] = FSE_GETSYMBOL(&state2); ++ ++ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ ++ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } ++ ++ op[2] = FSE_GETSYMBOL(&state1); ++ ++ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ ++ BIT_reloadDStream(&bitD); ++ ++ op[3] = FSE_GETSYMBOL(&state2); ++ } ++ ++ /* tail */ ++ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ ++ while (1) { ++ if (op>(omax-2)) return ERROR(dstSize_tooSmall); ++ *op++ = FSE_GETSYMBOL(&state1); ++ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { ++ *op++ = FSE_GETSYMBOL(&state2); ++ break; ++ } ++ ++ if (op>(omax-2)) return ERROR(dstSize_tooSmall); ++ *op++ = FSE_GETSYMBOL(&state2); ++ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { ++ *op++ = FSE_GETSYMBOL(&state1); ++ break; ++ } } ++ ++ return op-ostart; ++} ++ ++ ++size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, ++ const void* cSrc, size_t cSrcSize, ++ const FSE_DTable* dt) ++{ ++ const void* ptr = dt; ++ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; ++ const U32 fastMode = DTableH->fastMode; ++ ++ /* select fast mode (static) */ ++ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); ++ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); ++} ++ ++ ++size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) ++{ ++ const BYTE* const istart = (const BYTE*)cSrc; ++ const BYTE* ip = istart; ++ short counting[FSE_MAX_SYMBOL_VALUE+1]; ++ unsigned tableLog; ++ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; ++ ++ /* normal FSE decoding mode */ ++ size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); ++ if (FSE_isError(NCountLength)) return NCountLength; ++ //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ ++ if (tableLog > maxLog) return ERROR(tableLog_tooLarge); ++ ip += NCountLength; ++ cSrcSize -= NCountLength; ++ ++ CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); ++ ++ return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ ++} ++ ++ ++#endif /* FSE_COMMONDEFS_ONLY */ +diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h +new file mode 100644 +index 0000000..f36aded +--- /dev/null ++++ b/lib/zstd/huf.h +@@ -0,0 +1,203 @@ ++/* ****************************************************************** ++ Huffman coder, part of New Generation Entropy library ++ header file ++ Copyright (C) 2013-2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ++****************************************************************** */ ++#ifndef HUF_H_298734234 ++#define HUF_H_298734234 ++ ++ ++/* *** Dependencies *** */ ++#include /* size_t */ ++ ++ ++/* *** Tool functions *** */ ++#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ ++size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ ++ ++/* Error Management */ ++unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ ++ ++ ++/* *** Advanced function *** */ ++ ++/** HUF_compress4X_wksp() : ++* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ ++size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ ++ ++ ++ ++/* *** Dependencies *** */ ++#include "mem.h" /* U32 */ ++ ++ ++/* *** Constants *** */ ++#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ ++#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ ++#define HUF_SYMBOLVALUE_MAX 255 ++ ++#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ ++#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) ++# error "HUF_TABLELOG_MAX is too large !" ++#endif ++ ++ ++/* **************************************** ++* Static allocation ++******************************************/ ++/* HUF buffer bounds */ ++#define HUF_CTABLEBOUND 129 ++#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ ++#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ ++ ++/* static allocation of HUF's Compression Table */ ++#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ ++ U32 name##hb[maxSymbolValue+1]; \ ++ void* name##hv = &(name##hb); \ ++ HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ ++ ++/* static allocation of HUF's DTable */ ++typedef U32 HUF_DTable; ++#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) ++#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ ++ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } ++#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ ++ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } ++ ++/* The workspace must have alignment at least 4 and be at least this large */ ++#define HUF_WORKSPACE_SIZE (6 << 10) ++#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) ++ ++ ++/* **************************************** ++* Advanced decompression functions ++******************************************/ ++size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ ++size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ ++size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ ++size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ ++ ++ ++/* **************************************** ++* HUF detailed API ++******************************************/ ++/*! ++HUF_compress() does the following: ++1. count symbol occurrence from source[] into table count[] using FSE_count() ++2. (optional) refine tableLog using HUF_optimalTableLog() ++3. build Huffman table from count using HUF_buildCTable() ++4. save Huffman table to memory buffer using HUF_writeCTable() ++5. encode the data stream using HUF_compress4X_usingCTable() ++ ++The following API allows targeting specific sub-functions for advanced tasks. ++For example, it's possible to compress several blocks using the same 'CTable', ++or to save and regenerate 'CTable' using external methods. ++*/ ++/* FSE_count() : find it within "fse.h" */ ++unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); ++typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ ++size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); ++size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); ++ ++typedef enum { ++ HUF_repeat_none, /**< Cannot use the previous table */ ++ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ ++ HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ ++ } HUF_repeat; ++/** HUF_compress4X_repeat() : ++* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. ++* If it uses hufTable it does not modify hufTable or repeat. ++* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. ++* If preferRepeat then the old table will always be used if valid. */ ++size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ ++ ++/** HUF_buildCTable_wksp() : ++ * Same as HUF_buildCTable(), but using externally allocated scratch buffer. ++ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. ++ */ ++size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); ++ ++/*! HUF_readStats() : ++ Read compact Huffman tree, saved by HUF_writeCTable(). ++ `huffWeight` is destination buffer. ++ @return : size read from `src` , or an error Code . ++ Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ ++size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, ++ U32* nbSymbolsPtr, U32* tableLogPtr, ++ const void* src, size_t srcSize); ++ ++/** HUF_readCTable() : ++* Loading a CTable saved with HUF_writeCTable() */ ++size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize); ++ ++ ++/* ++HUF_decompress() does the following: ++1. select the decompression algorithm (X2, X4) based on pre-computed heuristics ++2. build Huffman table from save, using HUF_readDTableXn() ++3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable ++*/ ++ ++/** HUF_selectDecoder() : ++* Tells which decoder is likely to decode faster, ++* based on a set of pre-determined metrics. ++* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . ++* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ ++U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); ++ ++size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); ++size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); ++ ++size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); ++size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); ++size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); ++ ++ ++/* single stream variants */ ++ ++size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ ++size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); ++/** HUF_compress1X_repeat() : ++* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. ++* If it uses hufTable it does not modify hufTable or repeat. ++* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. ++* If preferRepeat then the old table will always be used if valid. */ ++size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ ++ ++size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); ++size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ ++size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ ++ ++size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ ++size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); ++size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); ++ ++#endif /* HUF_H_298734234 */ +diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c +new file mode 100644 +index 0000000..a1a1d45 +--- /dev/null ++++ b/lib/zstd/huf_compress.c +@@ -0,0 +1,644 @@ ++/* ****************************************************************** ++ Huffman encoder, part of New Generation Entropy library ++ Copyright (C) 2013-2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy ++ - Public forum : https://groups.google.com/forum/#!forum/lz4c ++****************************************************************** */ ++ ++ ++/* ************************************************************** ++* Includes ++****************************************************************/ ++#include /* memcpy, memset */ ++#include "bitstream.h" ++#include "fse.h" /* header compression */ ++#include "huf.h" ++ ++ ++/* ************************************************************** ++* Error Management ++****************************************************************/ ++#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ ++#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f ++#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } ++ ++ ++/* ************************************************************** ++* Utils ++****************************************************************/ ++unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) ++{ ++ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); ++} ++ ++ ++/* ******************************************************* ++* HUF : Huffman block compression ++*********************************************************/ ++/* HUF_compressWeights() : ++ * Same as FSE_compress(), but dedicated to huff0's weights compression. ++ * The use case needs much less stack memory. ++ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. ++ */ ++#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 ++size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) ++{ ++ BYTE* const ostart = (BYTE*) dst; ++ BYTE* op = ostart; ++ BYTE* const oend = ostart + dstSize; ++ ++ U32 maxSymbolValue = HUF_TABLELOG_MAX; ++ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; ++ ++ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; ++ BYTE scratchBuffer[1< not compressible */ ++ } ++ ++ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); ++ CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); ++ ++ /* Write table description header */ ++ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); ++ op += hSize; ++ } ++ ++ /* Compress */ ++ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); ++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); ++ if (cSize == 0) return 0; /* not enough space for compressed data */ ++ op += cSize; ++ } ++ ++ return op-ostart; ++} ++ ++ ++struct HUF_CElt_s { ++ U16 val; ++ BYTE nbBits; ++}; /* typedef'd to HUF_CElt within "huf.h" */ ++ ++/*! HUF_writeCTable() : ++ `CTable` : Huffman tree to save, using huf representation. ++ @return : size of saved CTable */ ++size_t HUF_writeCTable (void* dst, size_t maxDstSize, ++ const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) ++{ ++ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ ++ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; ++ BYTE* op = (BYTE*)dst; ++ U32 n; ++ ++ /* check conditions */ ++ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); ++ ++ /* convert to weight */ ++ bitsToWeight[0] = 0; ++ for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ ++ op[0] = (BYTE)hSize; ++ return hSize+1; ++ } } ++ ++ /* write raw values as 4-bits (max : 15) */ ++ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ ++ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ ++ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); ++ huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ ++ for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); ++ if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); ++ ++ /* Prepare base value per rank */ ++ { U32 n, nextRankStart = 0; ++ for (n=1; n<=tableLog; n++) { ++ U32 current = nextRankStart; ++ nextRankStart += (rankVal[n] << (n-1)); ++ rankVal[n] = current; ++ } } ++ ++ /* fill nbBits */ ++ { U32 n; for (n=0; nn=tableLog+1 */ ++ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; ++ { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ ++ valPerRank[n] = min; /* get starting value within each rank */ ++ min += nbPerRank[n]; ++ min >>= 1; ++ } } ++ /* assign value within rank, symbol order */ ++ { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } ++ } ++ ++ return readSize; ++} ++ ++ ++typedef struct nodeElt_s { ++ U32 count; ++ U16 parent; ++ BYTE byte; ++ BYTE nbBits; ++} nodeElt; ++ ++static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) ++{ ++ const U32 largestBits = huffNode[lastNonNull].nbBits; ++ if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ ++ ++ /* there are several too large elements (at least >= 2) */ ++ { int totalCost = 0; ++ const U32 baseCost = 1 << (largestBits - maxNbBits); ++ U32 n = lastNonNull; ++ ++ while (huffNode[n].nbBits > maxNbBits) { ++ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); ++ huffNode[n].nbBits = (BYTE)maxNbBits; ++ n --; ++ } /* n stops at huffNode[n].nbBits <= maxNbBits */ ++ while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ ++ ++ /* renorm totalCost */ ++ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ ++ ++ /* repay normalized cost */ ++ { U32 const noSymbol = 0xF0F0F0F0; ++ U32 rankLast[HUF_TABLELOG_MAX+2]; ++ int pos; ++ ++ /* Get pos of last (smallest) symbol per rank */ ++ memset(rankLast, 0xF0, sizeof(rankLast)); ++ { U32 currentNbBits = maxNbBits; ++ for (pos=n ; pos >= 0; pos--) { ++ if (huffNode[pos].nbBits >= currentNbBits) continue; ++ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ ++ rankLast[maxNbBits-currentNbBits] = pos; ++ } } ++ ++ while (totalCost > 0) { ++ U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; ++ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { ++ U32 highPos = rankLast[nBitsToDecrease]; ++ U32 lowPos = rankLast[nBitsToDecrease-1]; ++ if (highPos == noSymbol) continue; ++ if (lowPos == noSymbol) break; ++ { U32 const highTotal = huffNode[highPos].count; ++ U32 const lowTotal = 2 * huffNode[lowPos].count; ++ if (highTotal <= lowTotal) break; ++ } } ++ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ ++ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ ++ nBitsToDecrease ++; ++ totalCost -= 1 << (nBitsToDecrease-1); ++ if (rankLast[nBitsToDecrease-1] == noSymbol) ++ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ ++ huffNode[rankLast[nBitsToDecrease]].nbBits ++; ++ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ ++ rankLast[nBitsToDecrease] = noSymbol; ++ else { ++ rankLast[nBitsToDecrease]--; ++ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) ++ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ ++ } } /* while (totalCost > 0) */ ++ ++ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ ++ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ ++ while (huffNode[n].nbBits == maxNbBits) n--; ++ huffNode[n+1].nbBits--; ++ rankLast[1] = n+1; ++ totalCost++; ++ continue; ++ } ++ huffNode[ rankLast[1] + 1 ].nbBits--; ++ rankLast[1]++; ++ totalCost ++; ++ } } } /* there are several too large elements (at least >= 2) */ ++ ++ return maxNbBits; ++} ++ ++ ++typedef struct { ++ U32 base; ++ U32 current; ++} rankPos; ++ ++static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) ++{ ++ rankPos rank[32]; ++ U32 n; ++ ++ memset(rank, 0, sizeof(rank)); ++ for (n=0; n<=maxSymbolValue; n++) { ++ U32 r = BIT_highbit32(count[n] + 1); ++ rank[r].base ++; ++ } ++ for (n=30; n>0; n--) rank[n-1].base += rank[n].base; ++ for (n=0; n<32; n++) rank[n].current = rank[n].base; ++ for (n=0; n<=maxSymbolValue; n++) { ++ U32 const c = count[n]; ++ U32 const r = BIT_highbit32(c+1) + 1; ++ U32 pos = rank[r].current++; ++ while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; ++ huffNode[pos].count = c; ++ huffNode[pos].byte = (BYTE)n; ++ } ++} ++ ++ ++/** HUF_buildCTable_wksp() : ++ * Same as HUF_buildCTable(), but using externally allocated scratch buffer. ++ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. ++ */ ++#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) ++typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1]; ++size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) ++{ ++ nodeElt* const huffNode0 = (nodeElt*)workSpace; ++ nodeElt* const huffNode = huffNode0+1; ++ U32 n, nonNullRank; ++ int lowS, lowN; ++ U16 nodeNb = STARTNODE; ++ U32 nodeRoot; ++ ++ /* safety checks */ ++ if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */ ++ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; ++ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC); ++ memset(huffNode0, 0, sizeof(huffNodeTable)); ++ ++ /* sort, decreasing order */ ++ HUF_sort(huffNode, count, maxSymbolValue); ++ ++ /* init for parents */ ++ nonNullRank = maxSymbolValue; ++ while(huffNode[nonNullRank].count == 0) nonNullRank--; ++ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; ++ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; ++ huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; ++ nodeNb++; lowS-=2; ++ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); ++ huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ ++ ++ /* create parents */ ++ while (nodeNb <= nodeRoot) { ++ U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; ++ U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; ++ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; ++ huffNode[n1].parent = huffNode[n2].parent = nodeNb; ++ nodeNb++; ++ } ++ ++ /* distribute weights (unlimited tree height) */ ++ huffNode[nodeRoot].nbBits = 0; ++ for (n=nodeRoot-1; n>=STARTNODE; n--) ++ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; ++ for (n=0; n<=nonNullRank; n++) ++ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; ++ ++ /* enforce maxTableLog */ ++ maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); ++ ++ /* fill result into tree (val, nbBits) */ ++ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; ++ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; ++ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ ++ for (n=0; n<=nonNullRank; n++) ++ nbPerRank[huffNode[n].nbBits]++; ++ /* determine stating value per rank */ ++ { U16 min = 0; ++ for (n=maxNbBits; n>0; n--) { ++ valPerRank[n] = min; /* get starting value within each rank */ ++ min += nbPerRank[n]; ++ min >>= 1; ++ } } ++ for (n=0; n<=maxSymbolValue; n++) ++ tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ ++ for (n=0; n<=maxSymbolValue; n++) ++ tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ ++ } ++ ++ return maxNbBits; ++} ++ ++static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) ++{ ++ size_t nbBits = 0; ++ int s; ++ for (s = 0; s <= (int)maxSymbolValue; ++s) { ++ nbBits += CTable[s].nbBits * count[s]; ++ } ++ return nbBits >> 3; ++} ++ ++static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { ++ int bad = 0; ++ int s; ++ for (s = 0; s <= (int)maxSymbolValue; ++s) { ++ bad |= (count[s] != 0) & (CTable[s].nbBits == 0); ++ } ++ return !bad; ++} ++ ++static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) ++{ ++ BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); ++} ++ ++size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } ++ ++#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) ++ ++#define HUF_FLUSHBITS_1(stream) \ ++ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) ++ ++#define HUF_FLUSHBITS_2(stream) \ ++ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) ++ ++size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) ++{ ++ const BYTE* ip = (const BYTE*) src; ++ BYTE* const ostart = (BYTE*)dst; ++ BYTE* const oend = ostart + dstSize; ++ BYTE* op = ostart; ++ size_t n; ++ const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize)); ++ BIT_CStream_t bitC; ++ ++ /* init */ ++ if (dstSize < 8) return 0; /* not enough space to compress */ ++ { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); ++ if (HUF_isError(initErr)) return 0; } ++ ++ n = srcSize & ~3; /* join to mod 4 */ ++ switch (srcSize & 3) ++ { ++ case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); ++ HUF_FLUSHBITS_2(&bitC); ++ case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); ++ HUF_FLUSHBITS_1(&bitC); ++ case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); ++ HUF_FLUSHBITS(&bitC); ++ case 0 : ++ default: ; ++ } ++ ++ for (; n>0; n-=4) { /* note : n&3==0 at this stage */ ++ HUF_encodeSymbol(&bitC, ip[n- 1], CTable); ++ HUF_FLUSHBITS_1(&bitC); ++ HUF_encodeSymbol(&bitC, ip[n- 2], CTable); ++ HUF_FLUSHBITS_2(&bitC); ++ HUF_encodeSymbol(&bitC, ip[n- 3], CTable); ++ HUF_FLUSHBITS_1(&bitC); ++ HUF_encodeSymbol(&bitC, ip[n- 4], CTable); ++ HUF_FLUSHBITS(&bitC); ++ } ++ ++ return BIT_closeCStream(&bitC); ++} ++ ++ ++size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) ++{ ++ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ ++ const BYTE* ip = (const BYTE*) src; ++ const BYTE* const iend = ip + srcSize; ++ BYTE* const ostart = (BYTE*) dst; ++ BYTE* const oend = ostart + dstSize; ++ BYTE* op = ostart; ++ ++ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ ++ if (srcSize < 12) return 0; /* no saving possible : too small input */ ++ op += 6; /* jumpTable */ ++ ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ if (cSize==0) return 0; ++ MEM_writeLE16(ostart, (U16)cSize); ++ op += cSize; ++ } ++ ++ ip += segmentSize; ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ if (cSize==0) return 0; ++ MEM_writeLE16(ostart+2, (U16)cSize); ++ op += cSize; ++ } ++ ++ ip += segmentSize; ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); ++ if (cSize==0) return 0; ++ MEM_writeLE16(ostart+4, (U16)cSize); ++ op += cSize; ++ } ++ ++ ip += segmentSize; ++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); ++ if (cSize==0) return 0; ++ op += cSize; ++ } ++ ++ return op-ostart; ++} ++ ++ ++static size_t HUF_compressCTable_internal( ++ BYTE* const ostart, BYTE* op, BYTE* const oend, ++ const void* src, size_t srcSize, ++ unsigned singleStream, const HUF_CElt* CTable) ++{ ++ size_t const cSize = singleStream ? ++ HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : ++ HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); ++ if (HUF_isError(cSize)) { return cSize; } ++ if (cSize==0) { return 0; } /* uncompressible */ ++ op += cSize; ++ /* check compressibility */ ++ if ((size_t)(op-ostart) >= srcSize-1) { return 0; } ++ return op-ostart; ++} ++ ++ ++/* `workSpace` must a table of at least 1024 unsigned */ ++static size_t HUF_compress_internal ( ++ void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ unsigned maxSymbolValue, unsigned huffLog, ++ unsigned singleStream, ++ void* workSpace, size_t wkspSize, ++ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat) ++{ ++ BYTE* const ostart = (BYTE*)dst; ++ BYTE* const oend = ostart + dstSize; ++ BYTE* op = ostart; ++ ++ U32* count; ++ size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); ++ HUF_CElt* CTable; ++ size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); ++ ++ /* checks & inits */ ++ if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); ++ if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ ++ if (!dstSize) return 0; /* cannot fit within dst budget */ ++ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ ++ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); ++ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; ++ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; ++ ++ count = (U32*)workSpace; ++ workSpace = (BYTE*)workSpace + countSize; ++ wkspSize -= countSize; ++ CTable = (HUF_CElt*)workSpace; ++ workSpace = (BYTE*)workSpace + CTableSize; ++ wkspSize -= CTableSize; ++ ++ /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ ++ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { ++ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); ++ } ++ ++ /* Scan input and build symbol stats */ ++ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); ++ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ ++ if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ ++ } ++ ++ /* Check validity of previous table */ ++ if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { ++ *repeat = HUF_repeat_none; ++ } ++ /* Heuristic : use existing table for small inputs */ ++ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { ++ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); ++ } ++ ++ /* Build Huffman Tree */ ++ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); ++ { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); ++ huffLog = (U32)maxBits; ++ /* Zero the unused symbols so we can check it for validity */ ++ memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); ++ } ++ ++ /* Write table description header */ ++ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); ++ /* Check if using the previous table will be beneficial */ ++ if (repeat && *repeat != HUF_repeat_none) { ++ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); ++ size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); ++ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { ++ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); ++ } ++ } ++ /* Use the new table */ ++ if (hSize + 12ul >= srcSize) { return 0; } ++ op += hSize; ++ if (repeat) { *repeat = HUF_repeat_none; } ++ if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */ ++ } ++ return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); ++} ++ ++ ++size_t HUF_compress1X_wksp (void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ unsigned maxSymbolValue, unsigned huffLog, ++ void* workSpace, size_t wkspSize) ++{ ++ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); ++} ++ ++size_t HUF_compress1X_repeat (void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ unsigned maxSymbolValue, unsigned huffLog, ++ void* workSpace, size_t wkspSize, ++ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) ++{ ++ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat); ++} ++ ++size_t HUF_compress4X_wksp (void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ unsigned maxSymbolValue, unsigned huffLog, ++ void* workSpace, size_t wkspSize) ++{ ++ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); ++} ++ ++size_t HUF_compress4X_repeat (void* dst, size_t dstSize, ++ const void* src, size_t srcSize, ++ unsigned maxSymbolValue, unsigned huffLog, ++ void* workSpace, size_t wkspSize, ++ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) ++{ ++ return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat); ++} +diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c +new file mode 100644 +index 0000000..f73223c +--- /dev/null ++++ b/lib/zstd/huf_decompress.c +@@ -0,0 +1,835 @@ ++/* ****************************************************************** ++ Huffman decoder, part of New Generation Entropy library ++ Copyright (C) 2013-2016, Yann Collet. ++ ++ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following disclaimer ++ in the documentation and/or other materials provided with the ++ distribution. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++ You can contact the author at : ++ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy ++ - Public forum : https://groups.google.com/forum/#!forum/lz4c ++****************************************************************** */ ++ ++/* ************************************************************** ++* Compiler specifics ++****************************************************************/ ++#define FORCE_INLINE static __always_inline ++ ++ ++/* ************************************************************** ++* Dependencies ++****************************************************************/ ++#include ++#include /* memcpy, memset */ ++#include "bitstream.h" /* BIT_* */ ++#include "fse.h" /* header compression */ ++#include "huf.h" ++ ++ ++/* ************************************************************** ++* Error Management ++****************************************************************/ ++#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ ++ ++ ++/*-***************************/ ++/* generic DTableDesc */ ++/*-***************************/ ++ ++typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; ++ ++static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) ++{ ++ DTableDesc dtd; ++ memcpy(&dtd, table, sizeof(dtd)); ++ return dtd; ++} ++ ++ ++/*-***************************/ ++/* single-symbol decoding */ ++/*-***************************/ ++ ++typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ ++ ++size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize) ++{ ++ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; ++ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ ++ U32 tableLog = 0; ++ U32 nbSymbols = 0; ++ size_t iSize; ++ void* const dtPtr = DTable + 1; ++ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; ++ ++ HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); ++ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ ++ ++ iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); ++ if (HUF_isError(iSize)) return iSize; ++ ++ /* Table header */ ++ { DTableDesc dtd = HUF_getDTableDesc(DTable); ++ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ ++ dtd.tableType = 0; ++ dtd.tableLog = (BYTE)tableLog; ++ memcpy(DTable, &dtd, sizeof(dtd)); ++ } ++ ++ /* Calculate starting value for each rank */ ++ { U32 n, nextRankStart = 0; ++ for (n=1; n> 1; ++ U32 u; ++ HUF_DEltX2 D; ++ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); ++ for (u = rankVal[w]; u < rankVal[w] + length; u++) ++ dt[u] = D; ++ rankVal[w] += length; ++ } } ++ ++ return iSize; ++} ++ ++ ++static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) ++{ ++ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ ++ BYTE const c = dt[val].byte; ++ BIT_skipBits(Dstream, dt[val].nbBits); ++ return c; ++} ++ ++#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ ++ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) ++ ++#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ ++ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ ++ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) ++ ++#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ ++ if (MEM_64bits()) \ ++ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) ++ ++FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) ++{ ++ BYTE* const pStart = p; ++ ++ /* up to 4 symbols at a time */ ++ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { ++ HUF_DECODE_SYMBOLX2_2(p, bitDPtr); ++ HUF_DECODE_SYMBOLX2_1(p, bitDPtr); ++ HUF_DECODE_SYMBOLX2_2(p, bitDPtr); ++ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); ++ } ++ ++ /* closer to the end */ ++ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) ++ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); ++ ++ /* no more data to retrieve from bitstream, hence no need to reload */ ++ while (p < pEnd) ++ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); ++ ++ return pEnd-pStart; ++} ++ ++static size_t HUF_decompress1X2_usingDTable_internal( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ BYTE* op = (BYTE*)dst; ++ BYTE* const oend = op + dstSize; ++ const void* dtPtr = DTable + 1; ++ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; ++ BIT_DStream_t bitD; ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ U32 const dtLog = dtd.tableLog; ++ ++ { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); ++ if (HUF_isError(errorCode)) return errorCode; } ++ ++ HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); ++ ++ /* check */ ++ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); ++ ++ return dstSize; ++} ++ ++size_t HUF_decompress1X2_usingDTable( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc dtd = HUF_getDTableDesc(DTable); ++ if (dtd.tableType != 0) return ERROR(GENERIC); ++ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); ++} ++ ++size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ const BYTE* ip = (const BYTE*) cSrc; ++ ++ size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize); ++ if (HUF_isError(hSize)) return hSize; ++ if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ++ ip += hSize; cSrcSize -= hSize; ++ ++ return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); ++} ++ ++ ++static size_t HUF_decompress4X2_usingDTable_internal( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ /* Check */ ++ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ ++ ++ { const BYTE* const istart = (const BYTE*) cSrc; ++ BYTE* const ostart = (BYTE*) dst; ++ BYTE* const oend = ostart + dstSize; ++ const void* const dtPtr = DTable + 1; ++ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; ++ ++ /* Init */ ++ BIT_DStream_t bitD1; ++ BIT_DStream_t bitD2; ++ BIT_DStream_t bitD3; ++ BIT_DStream_t bitD4; ++ size_t const length1 = MEM_readLE16(istart); ++ size_t const length2 = MEM_readLE16(istart+2); ++ size_t const length3 = MEM_readLE16(istart+4); ++ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); ++ const BYTE* const istart1 = istart + 6; /* jumpTable */ ++ const BYTE* const istart2 = istart1 + length1; ++ const BYTE* const istart3 = istart2 + length2; ++ const BYTE* const istart4 = istart3 + length3; ++ const size_t segmentSize = (dstSize+3) / 4; ++ BYTE* const opStart2 = ostart + segmentSize; ++ BYTE* const opStart3 = opStart2 + segmentSize; ++ BYTE* const opStart4 = opStart3 + segmentSize; ++ BYTE* op1 = ostart; ++ BYTE* op2 = opStart2; ++ BYTE* op3 = opStart3; ++ BYTE* op4 = opStart4; ++ U32 endSignal; ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ U32 const dtLog = dtd.tableLog; ++ ++ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ ++ { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); ++ if (HUF_isError(errorCode)) return errorCode; } ++ ++ /* 16-32 symbols per loop (4-8 symbols per stream) */ ++ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); ++ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { ++ HUF_DECODE_SYMBOLX2_2(op1, &bitD1); ++ HUF_DECODE_SYMBOLX2_2(op2, &bitD2); ++ HUF_DECODE_SYMBOLX2_2(op3, &bitD3); ++ HUF_DECODE_SYMBOLX2_2(op4, &bitD4); ++ HUF_DECODE_SYMBOLX2_1(op1, &bitD1); ++ HUF_DECODE_SYMBOLX2_1(op2, &bitD2); ++ HUF_DECODE_SYMBOLX2_1(op3, &bitD3); ++ HUF_DECODE_SYMBOLX2_1(op4, &bitD4); ++ HUF_DECODE_SYMBOLX2_2(op1, &bitD1); ++ HUF_DECODE_SYMBOLX2_2(op2, &bitD2); ++ HUF_DECODE_SYMBOLX2_2(op3, &bitD3); ++ HUF_DECODE_SYMBOLX2_2(op4, &bitD4); ++ HUF_DECODE_SYMBOLX2_0(op1, &bitD1); ++ HUF_DECODE_SYMBOLX2_0(op2, &bitD2); ++ HUF_DECODE_SYMBOLX2_0(op3, &bitD3); ++ HUF_DECODE_SYMBOLX2_0(op4, &bitD4); ++ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); ++ } ++ ++ /* check corruption */ ++ if (op1 > opStart2) return ERROR(corruption_detected); ++ if (op2 > opStart3) return ERROR(corruption_detected); ++ if (op3 > opStart4) return ERROR(corruption_detected); ++ /* note : op4 supposed already verified within main loop */ ++ ++ /* finish bitStreams one by one */ ++ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); ++ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); ++ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); ++ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); ++ ++ /* check */ ++ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); ++ if (!endSignal) return ERROR(corruption_detected); ++ ++ /* decoded size */ ++ return dstSize; ++ } ++} ++ ++ ++size_t HUF_decompress4X2_usingDTable( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc dtd = HUF_getDTableDesc(DTable); ++ if (dtd.tableType != 0) return ERROR(GENERIC); ++ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); ++} ++ ++ ++size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ const BYTE* ip = (const BYTE*) cSrc; ++ ++ size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize); ++ if (HUF_isError(hSize)) return hSize; ++ if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ++ ip += hSize; cSrcSize -= hSize; ++ ++ return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx); ++} ++ ++/* *************************/ ++/* double-symbols decoding */ ++/* *************************/ ++typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ ++ ++typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; ++ ++/* HUF_fillDTableX4Level2() : ++ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ ++static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, ++ const U32* rankValOrigin, const int minWeight, ++ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, ++ U32 nbBitsBaseline, U16 baseSeq) ++{ ++ HUF_DEltX4 DElt; ++ U32 rankVal[HUF_TABLELOG_MAX + 1]; ++ ++ /* get pre-calculated rankVal */ ++ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); ++ ++ /* fill skipped values */ ++ if (minWeight>1) { ++ U32 i, skipSize = rankVal[minWeight]; ++ MEM_writeLE16(&(DElt.sequence), baseSeq); ++ DElt.nbBits = (BYTE)(consumed); ++ DElt.length = 1; ++ for (i = 0; i < skipSize; i++) ++ DTable[i] = DElt; ++ } ++ ++ /* fill DTable */ ++ { U32 s; for (s=0; s= 1 */ ++ ++ rankVal[weight] += length; ++ } } ++} ++ ++typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; ++ ++static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, ++ const sortedSymbol_t* sortedList, const U32 sortedListSize, ++ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, ++ const U32 nbBitsBaseline) ++{ ++ U32 rankVal[HUF_TABLELOG_MAX + 1]; ++ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ ++ const U32 minBits = nbBitsBaseline - maxWeight; ++ U32 s; ++ ++ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); ++ ++ /* fill DTable */ ++ for (s=0; s= minBits) { /* enough room for a second symbol */ ++ U32 sortedRank; ++ int minWeight = nbBits + scaleLog; ++ if (minWeight < 1) minWeight = 1; ++ sortedRank = rankStart[minWeight]; ++ HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, ++ rankValOrigin[nbBits], minWeight, ++ sortedList+sortedRank, sortedListSize-sortedRank, ++ nbBitsBaseline, symbol); ++ } else { ++ HUF_DEltX4 DElt; ++ MEM_writeLE16(&(DElt.sequence), symbol); ++ DElt.nbBits = (BYTE)(nbBits); ++ DElt.length = 1; ++ { U32 const end = start + length; ++ U32 u; ++ for (u = start; u < end; u++) DTable[u] = DElt; ++ } } ++ rankVal[weight] += length; ++ } ++} ++ ++size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize) ++{ ++ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; ++ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; ++ U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 }; ++ U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 }; ++ U32* const rankStart = rankStart0+1; ++ rankVal_t rankVal; ++ U32 tableLog, maxW, sizeOfSort, nbSymbols; ++ DTableDesc dtd = HUF_getDTableDesc(DTable); ++ U32 const maxTableLog = dtd.maxTableLog; ++ size_t iSize; ++ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ ++ HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; ++ ++ HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ ++ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); ++ /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ ++ ++ iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); ++ if (HUF_isError(iSize)) return iSize; ++ ++ /* check result */ ++ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ ++ ++ /* find maxWeight */ ++ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ ++ ++ /* Get start index of each weight */ ++ { U32 w, nextRankStart = 0; ++ for (w=1; w> consumed; ++ } } } } ++ ++ HUF_fillDTableX4(dt, maxTableLog, ++ sortedSymbol, sizeOfSort, ++ rankStart0, rankVal, maxW, ++ tableLog+1); ++ ++ dtd.tableLog = (BYTE)maxTableLog; ++ dtd.tableType = 1; ++ memcpy(DTable, &dtd, sizeof(dtd)); ++ return iSize; ++} ++ ++ ++static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) ++{ ++ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ ++ memcpy(op, dt+val, 2); ++ BIT_skipBits(DStream, dt[val].nbBits); ++ return dt[val].length; ++} ++ ++static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) ++{ ++ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ ++ memcpy(op, dt+val, 1); ++ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); ++ else { ++ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { ++ BIT_skipBits(DStream, dt[val].nbBits); ++ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) ++ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ ++ } } ++ return 1; ++} ++ ++ ++#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ++ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) ++ ++#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ ++ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ ++ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) ++ ++#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ ++ if (MEM_64bits()) \ ++ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) ++ ++FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) ++{ ++ BYTE* const pStart = p; ++ ++ /* up to 8 symbols at a time */ ++ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { ++ HUF_DECODE_SYMBOLX4_2(p, bitDPtr); ++ HUF_DECODE_SYMBOLX4_1(p, bitDPtr); ++ HUF_DECODE_SYMBOLX4_2(p, bitDPtr); ++ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); ++ } ++ ++ /* closer to end : up to 2 symbols at a time */ ++ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) ++ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); ++ ++ while (p <= pEnd-2) ++ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ ++ ++ if (p < pEnd) ++ p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); ++ ++ return p-pStart; ++} ++ ++ ++static size_t HUF_decompress1X4_usingDTable_internal( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ BIT_DStream_t bitD; ++ ++ /* Init */ ++ { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); ++ if (HUF_isError(errorCode)) return errorCode; ++ } ++ ++ /* decode */ ++ { BYTE* const ostart = (BYTE*) dst; ++ BYTE* const oend = ostart + dstSize; ++ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ ++ const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); ++ } ++ ++ /* check */ ++ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); ++ ++ /* decoded size */ ++ return dstSize; ++} ++ ++size_t HUF_decompress1X4_usingDTable( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc dtd = HUF_getDTableDesc(DTable); ++ if (dtd.tableType != 1) return ERROR(GENERIC); ++ return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); ++} ++ ++size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ const BYTE* ip = (const BYTE*) cSrc; ++ ++ size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize); ++ if (HUF_isError(hSize)) return hSize; ++ if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ++ ip += hSize; cSrcSize -= hSize; ++ ++ return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); ++} ++ ++static size_t HUF_decompress4X4_usingDTable_internal( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ ++ ++ { const BYTE* const istart = (const BYTE*) cSrc; ++ BYTE* const ostart = (BYTE*) dst; ++ BYTE* const oend = ostart + dstSize; ++ const void* const dtPtr = DTable+1; ++ const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; ++ ++ /* Init */ ++ BIT_DStream_t bitD1; ++ BIT_DStream_t bitD2; ++ BIT_DStream_t bitD3; ++ BIT_DStream_t bitD4; ++ size_t const length1 = MEM_readLE16(istart); ++ size_t const length2 = MEM_readLE16(istart+2); ++ size_t const length3 = MEM_readLE16(istart+4); ++ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); ++ const BYTE* const istart1 = istart + 6; /* jumpTable */ ++ const BYTE* const istart2 = istart1 + length1; ++ const BYTE* const istart3 = istart2 + length2; ++ const BYTE* const istart4 = istart3 + length3; ++ size_t const segmentSize = (dstSize+3) / 4; ++ BYTE* const opStart2 = ostart + segmentSize; ++ BYTE* const opStart3 = opStart2 + segmentSize; ++ BYTE* const opStart4 = opStart3 + segmentSize; ++ BYTE* op1 = ostart; ++ BYTE* op2 = opStart2; ++ BYTE* op3 = opStart3; ++ BYTE* op4 = opStart4; ++ U32 endSignal; ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ U32 const dtLog = dtd.tableLog; ++ ++ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ ++ { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); ++ if (HUF_isError(errorCode)) return errorCode; } ++ { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); ++ if (HUF_isError(errorCode)) return errorCode; } ++ ++ /* 16-32 symbols per loop (4-8 symbols per stream) */ ++ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); ++ for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { ++ HUF_DECODE_SYMBOLX4_2(op1, &bitD1); ++ HUF_DECODE_SYMBOLX4_2(op2, &bitD2); ++ HUF_DECODE_SYMBOLX4_2(op3, &bitD3); ++ HUF_DECODE_SYMBOLX4_2(op4, &bitD4); ++ HUF_DECODE_SYMBOLX4_1(op1, &bitD1); ++ HUF_DECODE_SYMBOLX4_1(op2, &bitD2); ++ HUF_DECODE_SYMBOLX4_1(op3, &bitD3); ++ HUF_DECODE_SYMBOLX4_1(op4, &bitD4); ++ HUF_DECODE_SYMBOLX4_2(op1, &bitD1); ++ HUF_DECODE_SYMBOLX4_2(op2, &bitD2); ++ HUF_DECODE_SYMBOLX4_2(op3, &bitD3); ++ HUF_DECODE_SYMBOLX4_2(op4, &bitD4); ++ HUF_DECODE_SYMBOLX4_0(op1, &bitD1); ++ HUF_DECODE_SYMBOLX4_0(op2, &bitD2); ++ HUF_DECODE_SYMBOLX4_0(op3, &bitD3); ++ HUF_DECODE_SYMBOLX4_0(op4, &bitD4); ++ ++ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); ++ } ++ ++ /* check corruption */ ++ if (op1 > opStart2) return ERROR(corruption_detected); ++ if (op2 > opStart3) return ERROR(corruption_detected); ++ if (op3 > opStart4) return ERROR(corruption_detected); ++ /* note : op4 already verified within main loop */ ++ ++ /* finish bitStreams one by one */ ++ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); ++ HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); ++ HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); ++ HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); ++ ++ /* check */ ++ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); ++ if (!endCheck) return ERROR(corruption_detected); } ++ ++ /* decoded size */ ++ return dstSize; ++ } ++} ++ ++ ++size_t HUF_decompress4X4_usingDTable( ++ void* dst, size_t dstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc dtd = HUF_getDTableDesc(DTable); ++ if (dtd.tableType != 1) return ERROR(GENERIC); ++ return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); ++} ++ ++ ++size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ const BYTE* ip = (const BYTE*) cSrc; ++ ++ size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize); ++ if (HUF_isError(hSize)) return hSize; ++ if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ++ ip += hSize; cSrcSize -= hSize; ++ ++ return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); ++} ++ ++ ++/* ********************************/ ++/* Generic decompression selector */ ++/* ********************************/ ++ ++size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : ++ HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); ++} ++ ++size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, ++ const void* cSrc, size_t cSrcSize, ++ const HUF_DTable* DTable) ++{ ++ DTableDesc const dtd = HUF_getDTableDesc(DTable); ++ return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : ++ HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); ++} ++ ++ ++typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; ++static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = ++{ ++ /* single, double, quad */ ++ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ ++ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ ++ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ ++ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ ++ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ ++ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ ++ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ ++ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ ++ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ ++ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ ++ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ ++ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ ++ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ ++ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ ++ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ ++ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ ++}; ++ ++/** HUF_selectDecoder() : ++* Tells which decoder is likely to decode faster, ++* based on a set of pre-determined metrics. ++* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . ++* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ ++U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) ++{ ++ /* decoder timing evaluation */ ++ U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ ++ U32 const D256 = (U32)(dstSize >> 8); ++ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); ++ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); ++ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ ++ ++ return DTime1 < DTime0; ++} ++ ++ ++typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); ++ ++size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ /* validation checks */ ++ if (dstSize == 0) return ERROR(dstSize_tooSmall); ++ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ ++ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ ++ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ ++ ++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); ++ return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : ++ HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; ++ } ++} ++ ++size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ /* validation checks */ ++ if (dstSize == 0) return ERROR(dstSize_tooSmall); ++ if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ ++ ++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); ++ return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : ++ HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; ++ } ++} ++ ++size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) ++{ ++ /* validation checks */ ++ if (dstSize == 0) return ERROR(dstSize_tooSmall); ++ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ ++ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ ++ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ ++ ++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); ++ return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : ++ HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; ++ } ++} +diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h +new file mode 100644 +index 0000000..76cae04 +--- /dev/null ++++ b/lib/zstd/mem.h +@@ -0,0 +1,209 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++#ifndef MEM_H_MODULE ++#define MEM_H_MODULE ++ ++/*-**************************************** ++* Dependencies ++******************************************/ ++#include ++#include /* size_t, ptrdiff_t */ ++#include /* memcpy */ ++ ++ ++/*-**************************************** ++* Compiler specifics ++******************************************/ ++#define MEM_STATIC static __inline __attribute__((unused)) ++ ++/* code only tested on 32 and 64 bits systems */ ++#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } ++MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } ++ ++ ++/*-************************************************************** ++* Basic Types ++*****************************************************************/ ++typedef uint8_t BYTE; ++typedef uint16_t U16; ++typedef int16_t S16; ++typedef uint32_t U32; ++typedef int32_t S32; ++typedef uint64_t U64; ++typedef int64_t S64; ++typedef ptrdiff_t iPtrDiff; ++typedef uintptr_t uPtrDiff; ++ ++ ++/*-************************************************************** ++* Memory I/O ++*****************************************************************/ ++MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } ++MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } ++ ++#if defined(__LITTLE_ENDIAN) ++# define MEM_LITTLE_ENDIAN 1 ++#else ++# define MEM_LITTLE_ENDIAN 0 ++#endif ++ ++MEM_STATIC unsigned MEM_isLittleEndian(void) ++{ ++ return MEM_LITTLE_ENDIAN; ++} ++ ++MEM_STATIC U16 MEM_read16(const void* memPtr) ++{ ++ return get_unaligned((const U16*)memPtr); ++} ++ ++MEM_STATIC U32 MEM_read32(const void* memPtr) ++{ ++ return get_unaligned((const U32*)memPtr); ++} ++ ++MEM_STATIC U64 MEM_read64(const void* memPtr) ++{ ++ return get_unaligned((const U64*)memPtr); ++} ++ ++MEM_STATIC size_t MEM_readST(const void* memPtr) ++{ ++ return get_unaligned((const size_t*)memPtr); ++} ++ ++MEM_STATIC void MEM_write16(void* memPtr, U16 value) ++{ ++ put_unaligned(value, (U16*)memPtr); ++} ++ ++MEM_STATIC void MEM_write32(void* memPtr, U32 value) ++{ ++ put_unaligned(value, (U32*)memPtr); ++} ++ ++MEM_STATIC void MEM_write64(void* memPtr, U64 value) ++{ ++ put_unaligned(value, (U64*)memPtr); ++} ++ ++/*=== Little endian r/w ===*/ ++ ++MEM_STATIC U16 MEM_readLE16(const void* memPtr) ++{ ++ return get_unaligned_le16(memPtr); ++} ++ ++MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) ++{ ++ put_unaligned_le16(val, memPtr); ++} ++ ++MEM_STATIC U32 MEM_readLE24(const void* memPtr) ++{ ++ return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); ++} ++ ++MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) ++{ ++ MEM_writeLE16(memPtr, (U16)val); ++ ((BYTE*)memPtr)[2] = (BYTE)(val>>16); ++} ++ ++MEM_STATIC U32 MEM_readLE32(const void* memPtr) ++{ ++ return get_unaligned_le32(memPtr); ++} ++ ++MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) ++{ ++ put_unaligned_le32(val32, memPtr); ++} ++ ++MEM_STATIC U64 MEM_readLE64(const void* memPtr) ++{ ++ return get_unaligned_le64(memPtr); ++} ++ ++MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) ++{ ++ put_unaligned_le64(val64, memPtr); ++} ++ ++MEM_STATIC size_t MEM_readLEST(const void* memPtr) ++{ ++ if (MEM_32bits()) ++ return (size_t)MEM_readLE32(memPtr); ++ else ++ return (size_t)MEM_readLE64(memPtr); ++} ++ ++MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) ++{ ++ if (MEM_32bits()) ++ MEM_writeLE32(memPtr, (U32)val); ++ else ++ MEM_writeLE64(memPtr, (U64)val); ++} ++ ++/*=== Big endian r/w ===*/ ++ ++MEM_STATIC U32 MEM_readBE32(const void* memPtr) ++{ ++ return get_unaligned_be32(memPtr); ++} ++ ++MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) ++{ ++ put_unaligned_be32(val32, memPtr); ++} ++ ++MEM_STATIC U64 MEM_readBE64(const void* memPtr) ++{ ++ return get_unaligned_be64(memPtr); ++} ++ ++MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) ++{ ++ put_unaligned_be64(val64, memPtr); ++} ++ ++MEM_STATIC size_t MEM_readBEST(const void* memPtr) ++{ ++ if (MEM_32bits()) ++ return (size_t)MEM_readBE32(memPtr); ++ else ++ return (size_t)MEM_readBE64(memPtr); ++} ++ ++MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) ++{ ++ if (MEM_32bits()) ++ MEM_writeBE32(memPtr, (U32)val); ++ else ++ MEM_writeBE64(memPtr, (U64)val); ++} ++ ++ ++/* function safe only for comparisons */ ++MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length) ++{ ++ switch (length) ++ { ++ default : ++ case 4 : return MEM_read32(memPtr); ++ case 3 : if (MEM_isLittleEndian()) ++ return MEM_read32(memPtr)<<8; ++ else ++ return MEM_read32(memPtr)>>8; ++ } ++} ++ ++#endif /* MEM_H_MODULE */ +diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c +new file mode 100644 +index 0000000..106f540 +--- /dev/null ++++ b/lib/zstd/zstd_common.c +@@ -0,0 +1,69 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++ ++ ++/*-************************************* ++* Dependencies ++***************************************/ ++#include "error_private.h" ++#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ ++#include ++ ++ ++/*=************************************************************** ++* Custom allocator ++****************************************************************/ ++ ++#define stack_push(stack, size) ({ \ ++ void* const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \ ++ (stack)->ptr = (char*)ptr + (size); \ ++ (stack)->ptr <= (stack)->end ? ptr : NULL; \ ++ }) ++ ++ZSTD_customMem ZSTD_initStack(void* workspace, size_t workspaceSize) { ++ ZSTD_customMem stackMem = { ZSTD_stackAlloc, ZSTD_stackFree, workspace }; ++ ZSTD_stack* stack = (ZSTD_stack*) workspace; ++ /* Verify preconditions */ ++ if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) { ++ ZSTD_customMem error = {NULL, NULL, NULL}; ++ return error; ++ } ++ /* Initialize the stack */ ++ stack->ptr = workspace; ++ stack->end = (char*)workspace + workspaceSize; ++ stack_push(stack, sizeof(ZSTD_stack)); ++ return stackMem; ++} ++ ++void* ZSTD_stackAllocAll(void* opaque, size_t* size) { ++ ZSTD_stack* stack = (ZSTD_stack*)opaque; ++ *size = stack->end - ZSTD_PTR_ALIGN(stack->ptr); ++ return stack_push(stack, *size); ++} ++ ++void* ZSTD_stackAlloc(void* opaque, size_t size) { ++ ZSTD_stack* stack = (ZSTD_stack*)opaque; ++ return stack_push(stack, size); ++} ++void ZSTD_stackFree(void* opaque, void* address) { ++ (void)opaque; ++ (void)address; ++} ++ ++void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) ++{ ++ return customMem.customAlloc(customMem.opaque, size); ++} ++ ++void ZSTD_free(void* ptr, ZSTD_customMem customMem) ++{ ++ if (ptr!=NULL) ++ customMem.customFree(customMem.opaque, ptr); ++} +diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h +new file mode 100644 +index 0000000..a61bd27 +--- /dev/null ++++ b/lib/zstd/zstd_internal.h +@@ -0,0 +1,274 @@ ++/** ++ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++#ifndef ZSTD_CCOMMON_H_MODULE ++#define ZSTD_CCOMMON_H_MODULE ++ ++/*-******************************************************* ++* Compiler specifics ++*********************************************************/ ++#define FORCE_INLINE static __always_inline ++#define FORCE_NOINLINE static noinline ++ ++ ++/*-************************************* ++* Dependencies ++***************************************/ ++#include ++#include ++#include ++#include ++#include "mem.h" ++#include "error_private.h" ++ ++ ++/*-************************************* ++* shared macros ++***************************************/ ++#define MIN(a,b) ((a)<(b) ? (a) : (b)) ++#define MAX(a,b) ((a)>(b) ? (a) : (b)) ++#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ ++#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ ++ ++ ++/*-************************************* ++* Common constants ++***************************************/ ++#define ZSTD_OPT_NUM (1<<12) ++#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ ++ ++#define ZSTD_REP_NUM 3 /* number of repcodes */ ++#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ ++#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) ++#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) ++static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; ++ ++#define KB *(1 <<10) ++#define MB *(1 <<20) ++#define GB *(1U<<30) ++ ++#define BIT7 128 ++#define BIT6 64 ++#define BIT5 32 ++#define BIT4 16 ++#define BIT1 2 ++#define BIT0 1 ++ ++#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 ++static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; ++static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; ++ ++#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ ++static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; ++typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; ++ ++#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ ++#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ ++ ++#define HufLog 12 ++typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; ++ ++#define LONGNBSEQ 0x7F00 ++ ++#define MINMATCH 3 ++#define EQUAL_READ32 4 ++ ++#define Litbits 8 ++#define MaxLit ((1<= 3) /* GCC Intrinsic */ ++ return 31 - __builtin_clz(val); ++# else /* Software version */ ++ static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; ++ U32 v = val; ++ int r; ++ v |= v >> 1; ++ v |= v >> 2; ++ v |= v >> 4; ++ v |= v >> 8; ++ v |= v >> 16; ++ r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; ++ return r; ++# endif ++} ++ ++ ++/* hidden functions */ ++ ++/* ZSTD_invalidateRepCodes() : ++ * ensures next compression will not use repcodes from previous block. ++ * Note : only works with regular variant; ++ * do not use with extDict variant ! */ ++void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); ++ ++size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); ++size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); ++size_t ZSTD_freeCDict(ZSTD_CDict* cdict); ++size_t ZSTD_freeDDict(ZSTD_DDict* cdict); ++size_t ZSTD_freeCStream(ZSTD_CStream* zcs); ++size_t ZSTD_freeDStream(ZSTD_DStream* zds); ++ ++ ++#endif /* ZSTD_CCOMMON_H_MODULE */ +diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h +new file mode 100644 +index 0000000..297a715 +--- /dev/null ++++ b/lib/zstd/zstd_opt.h +@@ -0,0 +1,921 @@ ++/** ++ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. ++ * All rights reserved. ++ * ++ * This source code is licensed under the BSD-style license found in the ++ * LICENSE file in the root directory of this source tree. An additional grant ++ * of patent rights can be found in the PATENTS file in the same directory. ++ */ ++ ++ ++/* Note : this file is intended to be included within zstd_compress.c */ ++ ++ ++#ifndef ZSTD_OPT_H_91842398743 ++#define ZSTD_OPT_H_91842398743 ++ ++ ++#define ZSTD_LITFREQ_ADD 2 ++#define ZSTD_FREQ_DIV 4 ++#define ZSTD_MAX_PRICE (1<<30) ++ ++/*-************************************* ++* Price functions for optimal parser ++***************************************/ ++FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr) ++{ ++ ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1); ++ ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1); ++ ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1); ++ ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1); ++ ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum)); ++} ++ ++ ++MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize) ++{ ++ unsigned u; ++ ++ ssPtr->cachedLiterals = NULL; ++ ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; ++ ssPtr->staticPrices = 0; ++ ++ if (ssPtr->litLengthSum == 0) { ++ if (srcSize <= 1024) ssPtr->staticPrices = 1; ++ ++ for (u=0; u<=MaxLit; u++) ++ ssPtr->litFreq[u] = 0; ++ for (u=0; ulitFreq[src[u]]++; ++ ++ ssPtr->litSum = 0; ++ ssPtr->litLengthSum = MaxLL+1; ++ ssPtr->matchLengthSum = MaxML+1; ++ ssPtr->offCodeSum = (MaxOff+1); ++ ssPtr->matchSum = (ZSTD_LITFREQ_ADD<litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV); ++ ssPtr->litSum += ssPtr->litFreq[u]; ++ } ++ for (u=0; u<=MaxLL; u++) ++ ssPtr->litLengthFreq[u] = 1; ++ for (u=0; u<=MaxML; u++) ++ ssPtr->matchLengthFreq[u] = 1; ++ for (u=0; u<=MaxOff; u++) ++ ssPtr->offCodeFreq[u] = 1; ++ } else { ++ ssPtr->matchLengthSum = 0; ++ ssPtr->litLengthSum = 0; ++ ssPtr->offCodeSum = 0; ++ ssPtr->matchSum = 0; ++ ssPtr->litSum = 0; ++ ++ for (u=0; u<=MaxLit; u++) { ++ ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1)); ++ ssPtr->litSum += ssPtr->litFreq[u]; ++ } ++ for (u=0; u<=MaxLL; u++) { ++ ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); ++ ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; ++ } ++ for (u=0; u<=MaxML; u++) { ++ ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); ++ ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; ++ ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); ++ } ++ ssPtr->matchSum *= ZSTD_LITFREQ_ADD; ++ for (u=0; u<=MaxOff; u++) { ++ ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); ++ ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; ++ } ++ } ++ ++ ZSTD_setLog2Prices(ssPtr); ++} ++ ++ ++FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals) ++{ ++ U32 price, u; ++ ++ if (ssPtr->staticPrices) ++ return ZSTD_highbit32((U32)litLength+1) + (litLength*6); ++ ++ if (litLength == 0) ++ return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1); ++ ++ /* literals */ ++ if (ssPtr->cachedLiterals == literals) { ++ U32 const additional = litLength - ssPtr->cachedLitLength; ++ const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; ++ price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; ++ for (u=0; u < additional; u++) ++ price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1); ++ ssPtr->cachedPrice = price; ++ ssPtr->cachedLitLength = litLength; ++ } else { ++ price = litLength * ssPtr->log2litSum; ++ for (u=0; u < litLength; u++) ++ price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1); ++ ++ if (litLength >= 12) { ++ ssPtr->cachedLiterals = literals; ++ ssPtr->cachedPrice = price; ++ ssPtr->cachedLitLength = litLength; ++ } ++ } ++ ++ /* literal Length */ ++ { const BYTE LL_deltaCode = 19; ++ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; ++ price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1); ++ } ++ ++ return price; ++} ++ ++ ++FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra) ++{ ++ /* offset */ ++ U32 price; ++ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); ++ ++ if (seqStorePtr->staticPrices) ++ return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode; ++ ++ price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1); ++ if (!ultra && offCode >= 20) price += (offCode-19)*2; ++ ++ /* match Length */ ++ { const BYTE ML_deltaCode = 36; ++ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; ++ price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1); ++ } ++ ++ return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; ++} ++ ++ ++MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength) ++{ ++ U32 u; ++ ++ /* literals */ ++ seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD; ++ for (u=0; u < litLength; u++) ++ seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; ++ ++ /* literal Length */ ++ { const BYTE LL_deltaCode = 19; ++ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; ++ seqStorePtr->litLengthFreq[llCode]++; ++ seqStorePtr->litLengthSum++; ++ } ++ ++ /* match offset */ ++ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); ++ seqStorePtr->offCodeSum++; ++ seqStorePtr->offCodeFreq[offCode]++; ++ } ++ ++ /* match Length */ ++ { const BYTE ML_deltaCode = 36; ++ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; ++ seqStorePtr->matchLengthFreq[mlCode]++; ++ seqStorePtr->matchLengthSum++; ++ } ++ ++ ZSTD_setLog2Prices(seqStorePtr); ++} ++ ++ ++#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ ++ { \ ++ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ ++ opt[pos].mlen = mlen_; \ ++ opt[pos].off = offset_; \ ++ opt[pos].litlen = litlen_; \ ++ opt[pos].price = price_; \ ++ } ++ ++ ++ ++/* Update hashTable3 up to ip (excluded) ++ Assumption : always within prefix (i.e. not within extDict) */ ++FORCE_INLINE ++U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip) ++{ ++ U32* const hashTable3 = zc->hashTable3; ++ U32 const hashLog3 = zc->hashLog3; ++ const BYTE* const base = zc->base; ++ U32 idx = zc->nextToUpdate3; ++ const U32 target = zc->nextToUpdate3 = (U32)(ip - base); ++ const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); ++ ++ while(idx < target) { ++ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; ++ idx++; ++ } ++ ++ return hashTable3[hash3]; ++} ++ ++ ++/*-************************************* ++* Binary Tree search ++***************************************/ ++static U32 ZSTD_insertBtAndGetAllMatches ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iLimit, ++ U32 nbCompares, const U32 mls, ++ U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) ++{ ++ const BYTE* const base = zc->base; ++ const U32 current = (U32)(ip-base); ++ const U32 hashLog = zc->params.cParams.hashLog; ++ const size_t h = ZSTD_hashPtr(ip, hashLog, mls); ++ U32* const hashTable = zc->hashTable; ++ U32 matchIndex = hashTable[h]; ++ U32* const bt = zc->chainTable; ++ const U32 btLog = zc->params.cParams.chainLog - 1; ++ const U32 btMask= (1U << btLog) - 1; ++ size_t commonLengthSmaller=0, commonLengthLarger=0; ++ const BYTE* const dictBase = zc->dictBase; ++ const U32 dictLimit = zc->dictLimit; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const U32 btLow = btMask >= current ? 0 : current - btMask; ++ const U32 windowLow = zc->lowLimit; ++ U32* smallerPtr = bt + 2*(current&btMask); ++ U32* largerPtr = bt + 2*(current&btMask) + 1; ++ U32 matchEndIdx = current+8; ++ U32 dummy32; /* to be nullified at the end */ ++ U32 mnum = 0; ++ ++ const U32 minMatch = (mls == 3) ? 3 : 4; ++ size_t bestLength = minMatchLen-1; ++ ++ if (minMatch == 3) { /* HC3 match finder */ ++ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); ++ if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { ++ const BYTE* match; ++ size_t currentMl=0; ++ if ((!extDict) || matchIndex3 >= dictLimit) { ++ match = base + matchIndex3; ++ if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); ++ } else { ++ match = dictBase + matchIndex3; ++ if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ ++ currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; ++ } ++ ++ /* save best solution */ ++ if (currentMl > bestLength) { ++ bestLength = currentMl; ++ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; ++ matches[mnum].len = (U32)currentMl; ++ mnum++; ++ if (currentMl > ZSTD_OPT_NUM) goto update; ++ if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ ++ } ++ } ++ } ++ ++ hashTable[h] = current; /* Update Hash Table */ ++ ++ while (nbCompares-- && (matchIndex > windowLow)) { ++ U32* nextPtr = bt + 2*(matchIndex & btMask); ++ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ ++ const BYTE* match; ++ ++ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { ++ match = base + matchIndex; ++ if (match[matchLength] == ip[matchLength]) { ++ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1; ++ } ++ } else { ++ match = dictBase + matchIndex; ++ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); ++ if (matchIndex+matchLength >= dictLimit) ++ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ ++ } ++ ++ if (matchLength > bestLength) { ++ if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; ++ bestLength = matchLength; ++ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; ++ matches[mnum].len = (U32)matchLength; ++ mnum++; ++ if (matchLength > ZSTD_OPT_NUM) break; ++ if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */ ++ break; /* drop, to guarantee consistency (miss a little bit of compression) */ ++ } ++ ++ if (match[matchLength] < ip[matchLength]) { ++ /* match is smaller than current */ ++ *smallerPtr = matchIndex; /* update smaller idx */ ++ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ ++ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ ++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ ++ } else { ++ /* match is larger than current */ ++ *largerPtr = matchIndex; ++ commonLengthLarger = matchLength; ++ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ ++ largerPtr = nextPtr; ++ matchIndex = nextPtr[0]; ++ } } ++ ++ *smallerPtr = *largerPtr = 0; ++ ++update: ++ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; ++ return mnum; ++} ++ ++ ++/** Tree updater, providing best match */ ++static U32 ZSTD_BtGetAllMatches ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iLimit, ++ const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) ++{ ++ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ ++ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); ++ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); ++} ++ ++ ++static U32 ZSTD_BtGetAllMatches_selectMLS ( ++ ZSTD_CCtx* zc, /* Index table will be updated */ ++ const BYTE* ip, const BYTE* const iHighLimit, ++ const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) ++{ ++ switch(matchLengthSearch) ++ { ++ case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); ++ default : ++ case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); ++ case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); ++ case 7 : ++ case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); ++ } ++} ++ ++/** Tree updater, providing best match */ ++static U32 ZSTD_BtGetAllMatches_extDict ( ++ ZSTD_CCtx* zc, ++ const BYTE* const ip, const BYTE* const iLimit, ++ const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) ++{ ++ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ ++ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); ++ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); ++} ++ ++ ++static U32 ZSTD_BtGetAllMatches_selectMLS_extDict ( ++ ZSTD_CCtx* zc, /* Index table will be updated */ ++ const BYTE* ip, const BYTE* const iHighLimit, ++ const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) ++{ ++ switch(matchLengthSearch) ++ { ++ case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); ++ default : ++ case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); ++ case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); ++ case 7 : ++ case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); ++ } ++} ++ ++ ++/*-******************************* ++* Optimal parser ++*********************************/ ++FORCE_INLINE ++void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, const int ultra) ++{ ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ const BYTE* const base = ctx->base; ++ const BYTE* const prefixStart = base + ctx->dictLimit; ++ ++ const U32 maxSearches = 1U << ctx->params.cParams.searchLog; ++ const U32 sufficient_len = ctx->params.cParams.targetLength; ++ const U32 mls = ctx->params.cParams.searchLength; ++ const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; ++ ++ ZSTD_optimal_t* opt = seqStorePtr->priceTable; ++ ZSTD_match_t* matches = seqStorePtr->matchTable; ++ const BYTE* inr; ++ U32 offset, rep[ZSTD_REP_NUM]; ++ ++ /* init */ ++ ctx->nextToUpdate3 = ctx->nextToUpdate; ++ ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); ++ ip += (ip==prefixStart); ++ { U32 i; for (i=0; irep[i]; } ++ ++ /* Match Loop */ ++ while (ip < ilimit) { ++ U32 cur, match_num, last_pos, litlen, price; ++ U32 u, mlen, best_mlen, best_off, litLength; ++ memset(opt, 0, sizeof(ZSTD_optimal_t)); ++ last_pos = 0; ++ litlen = (U32)(ip - anchor); ++ ++ /* check repCode */ ++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); ++ for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) ++ && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) { ++ mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch; ++ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { ++ best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; ++ goto _storeSequence; ++ } ++ best_off = i - (ip == anchor); ++ do { ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); ++ if (mlen > last_pos || price < opt[mlen].price) ++ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ ++ mlen--; ++ } while (mlen >= minMatch); ++ } } } ++ ++ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); ++ ++ if (!last_pos && !match_num) { ip++; continue; } ++ ++ if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { ++ best_mlen = matches[match_num-1].len; ++ best_off = matches[match_num-1].off; ++ cur = 0; ++ last_pos = 1; ++ goto _storeSequence; ++ } ++ ++ /* set prices using matches at position = 0 */ ++ best_mlen = (last_pos) ? last_pos : minMatch; ++ for (u = 0; u < match_num; u++) { ++ mlen = (u>0) ? matches[u-1].len+1 : best_mlen; ++ best_mlen = matches[u].len; ++ while (mlen <= best_mlen) { ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); ++ if (mlen > last_pos || price < opt[mlen].price) ++ SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ ++ mlen++; ++ } } ++ ++ if (last_pos < minMatch) { ip++; continue; } ++ ++ /* initialize opt[0] */ ++ { U32 i ; for (i=0; i litlen) { ++ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); ++ } else ++ price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); ++ } else { ++ litlen = 1; ++ price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); ++ } ++ ++ if (cur > last_pos || price <= opt[cur].price) ++ SET_PRICE(cur, 1, 0, litlen, price); ++ ++ if (cur == last_pos) break; ++ ++ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ ++ continue; ++ ++ mlen = opt[cur].mlen; ++ if (opt[cur].off > ZSTD_REP_MOVE_OPT) { ++ opt[cur].rep[2] = opt[cur-mlen].rep[1]; ++ opt[cur].rep[1] = opt[cur-mlen].rep[0]; ++ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; ++ } else { ++ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; ++ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; ++ opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); ++ } ++ ++ best_mlen = minMatch; ++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); ++ for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) ++ && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) { ++ mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch; ++ ++ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { ++ best_mlen = mlen; best_off = i; last_pos = cur + 1; ++ goto _storeSequence; ++ } ++ ++ best_off = i - (opt[cur].mlen != 1); ++ if (mlen > best_mlen) best_mlen = mlen; ++ ++ do { ++ if (opt[cur].mlen == 1) { ++ litlen = opt[cur].litlen; ++ if (cur > litlen) { ++ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); ++ } else ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); ++ } else { ++ litlen = 0; ++ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); ++ } ++ ++ if (cur + mlen > last_pos || price <= opt[cur + mlen].price) ++ SET_PRICE(cur + mlen, mlen, i, litlen, price); ++ mlen--; ++ } while (mlen >= minMatch); ++ } } } ++ ++ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); ++ ++ if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { ++ best_mlen = matches[match_num-1].len; ++ best_off = matches[match_num-1].off; ++ last_pos = cur + 1; ++ goto _storeSequence; ++ } ++ ++ /* set prices using matches at position = cur */ ++ for (u = 0; u < match_num; u++) { ++ mlen = (u>0) ? matches[u-1].len+1 : best_mlen; ++ best_mlen = matches[u].len; ++ ++ while (mlen <= best_mlen) { ++ if (opt[cur].mlen == 1) { ++ litlen = opt[cur].litlen; ++ if (cur > litlen) ++ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); ++ else ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); ++ } else { ++ litlen = 0; ++ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); ++ } ++ ++ if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) ++ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); ++ ++ mlen++; ++ } } } ++ ++ best_mlen = opt[last_pos].mlen; ++ best_off = opt[last_pos].off; ++ cur = last_pos - best_mlen; ++ ++ /* store sequence */ ++_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ ++ opt[0].mlen = 1; ++ ++ while (1) { ++ mlen = opt[cur].mlen; ++ offset = opt[cur].off; ++ opt[cur].mlen = best_mlen; ++ opt[cur].off = best_off; ++ best_mlen = mlen; ++ best_off = offset; ++ if (mlen > cur) break; ++ cur -= mlen; ++ } ++ ++ for (u = 0; u <= last_pos;) { ++ u += opt[u].mlen; ++ } ++ ++ for (cur=0; cur < last_pos; ) { ++ mlen = opt[cur].mlen; ++ if (mlen == 1) { ip++; cur++; continue; } ++ offset = opt[cur].off; ++ cur += mlen; ++ litLength = (U32)(ip - anchor); ++ ++ if (offset > ZSTD_REP_MOVE_OPT) { ++ rep[2] = rep[1]; ++ rep[1] = rep[0]; ++ rep[0] = offset - ZSTD_REP_MOVE_OPT; ++ offset--; ++ } else { ++ if (offset != 0) { ++ best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); ++ if (offset != 1) rep[2] = rep[1]; ++ rep[1] = rep[0]; ++ rep[0] = best_off; ++ } ++ if (litLength==0) offset--; ++ } ++ ++ ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); ++ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); ++ anchor = ip = ip + mlen; ++ } } /* for (cur=0; cur < last_pos; ) */ ++ ++ /* Save reps for next block */ ++ { int i; for (i=0; irepToConfirm[i] = rep[i]; } ++ ++ /* Last Literals */ ++ { size_t const lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++ ++FORCE_INLINE ++void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, ++ const void* src, size_t srcSize, const int ultra) ++{ ++ seqStore_t* seqStorePtr = &(ctx->seqStore); ++ const BYTE* const istart = (const BYTE*)src; ++ const BYTE* ip = istart; ++ const BYTE* anchor = istart; ++ const BYTE* const iend = istart + srcSize; ++ const BYTE* const ilimit = iend - 8; ++ const BYTE* const base = ctx->base; ++ const U32 lowestIndex = ctx->lowLimit; ++ const U32 dictLimit = ctx->dictLimit; ++ const BYTE* const prefixStart = base + dictLimit; ++ const BYTE* const dictBase = ctx->dictBase; ++ const BYTE* const dictEnd = dictBase + dictLimit; ++ ++ const U32 maxSearches = 1U << ctx->params.cParams.searchLog; ++ const U32 sufficient_len = ctx->params.cParams.targetLength; ++ const U32 mls = ctx->params.cParams.searchLength; ++ const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; ++ ++ ZSTD_optimal_t* opt = seqStorePtr->priceTable; ++ ZSTD_match_t* matches = seqStorePtr->matchTable; ++ const BYTE* inr; ++ ++ /* init */ ++ U32 offset, rep[ZSTD_REP_NUM]; ++ { U32 i; for (i=0; irep[i]; } ++ ++ ctx->nextToUpdate3 = ctx->nextToUpdate; ++ ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); ++ ip += (ip==prefixStart); ++ ++ /* Match Loop */ ++ while (ip < ilimit) { ++ U32 cur, match_num, last_pos, litlen, price; ++ U32 u, mlen, best_mlen, best_off, litLength; ++ U32 current = (U32)(ip-base); ++ memset(opt, 0, sizeof(ZSTD_optimal_t)); ++ last_pos = 0; ++ opt[0].litlen = (U32)(ip - anchor); ++ ++ /* check repCode */ ++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); ++ for (i = (ip==anchor); i 0 && repCur <= (S32)current) ++ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ ++ && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { ++ /* repcode detected we should take it */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; ++ ++ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { ++ best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; ++ goto _storeSequence; ++ } ++ ++ best_off = i - (ip==anchor); ++ litlen = opt[0].litlen; ++ do { ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); ++ if (mlen > last_pos || price < opt[mlen].price) ++ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ ++ mlen--; ++ } while (mlen >= minMatch); ++ } } } ++ ++ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ ++ ++ if (!last_pos && !match_num) { ip++; continue; } ++ ++ { U32 i; for (i=0; i sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { ++ best_mlen = matches[match_num-1].len; ++ best_off = matches[match_num-1].off; ++ cur = 0; ++ last_pos = 1; ++ goto _storeSequence; ++ } ++ ++ best_mlen = (last_pos) ? last_pos : minMatch; ++ ++ /* set prices using matches at position = 0 */ ++ for (u = 0; u < match_num; u++) { ++ mlen = (u>0) ? matches[u-1].len+1 : best_mlen; ++ best_mlen = matches[u].len; ++ litlen = opt[0].litlen; ++ while (mlen <= best_mlen) { ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); ++ if (mlen > last_pos || price < opt[mlen].price) ++ SET_PRICE(mlen, mlen, matches[u].off, litlen, price); ++ mlen++; ++ } } ++ ++ if (last_pos < minMatch) { ++ ip++; continue; ++ } ++ ++ /* check further positions */ ++ for (cur = 1; cur <= last_pos; cur++) { ++ inr = ip + cur; ++ ++ if (opt[cur-1].mlen == 1) { ++ litlen = opt[cur-1].litlen + 1; ++ if (cur > litlen) { ++ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); ++ } else ++ price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); ++ } else { ++ litlen = 1; ++ price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); ++ } ++ ++ if (cur > last_pos || price <= opt[cur].price) ++ SET_PRICE(cur, 1, 0, litlen, price); ++ ++ if (cur == last_pos) break; ++ ++ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ ++ continue; ++ ++ mlen = opt[cur].mlen; ++ if (opt[cur].off > ZSTD_REP_MOVE_OPT) { ++ opt[cur].rep[2] = opt[cur-mlen].rep[1]; ++ opt[cur].rep[1] = opt[cur-mlen].rep[0]; ++ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; ++ } else { ++ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; ++ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; ++ opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); ++ } ++ ++ best_mlen = minMatch; ++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); ++ for (i = (mlen != 1); i 0 && repCur <= (S32)(current+cur)) ++ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ ++ && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { ++ /* repcode detected */ ++ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; ++ mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; ++ ++ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { ++ best_mlen = mlen; best_off = i; last_pos = cur + 1; ++ goto _storeSequence; ++ } ++ ++ best_off = i - (opt[cur].mlen != 1); ++ if (mlen > best_mlen) best_mlen = mlen; ++ ++ do { ++ if (opt[cur].mlen == 1) { ++ litlen = opt[cur].litlen; ++ if (cur > litlen) { ++ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); ++ } else ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); ++ } else { ++ litlen = 0; ++ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); ++ } ++ ++ if (cur + mlen > last_pos || price <= opt[cur + mlen].price) ++ SET_PRICE(cur + mlen, mlen, i, litlen, price); ++ mlen--; ++ } while (mlen >= minMatch); ++ } } } ++ ++ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); ++ ++ if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { ++ best_mlen = matches[match_num-1].len; ++ best_off = matches[match_num-1].off; ++ last_pos = cur + 1; ++ goto _storeSequence; ++ } ++ ++ /* set prices using matches at position = cur */ ++ for (u = 0; u < match_num; u++) { ++ mlen = (u>0) ? matches[u-1].len+1 : best_mlen; ++ best_mlen = matches[u].len; ++ ++ while (mlen <= best_mlen) { ++ if (opt[cur].mlen == 1) { ++ litlen = opt[cur].litlen; ++ if (cur > litlen) ++ price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); ++ else ++ price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); ++ } else { ++ litlen = 0; ++ price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); ++ } ++ ++ if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) ++ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); ++ ++ mlen++; ++ } } } /* for (cur = 1; cur <= last_pos; cur++) */ ++ ++ best_mlen = opt[last_pos].mlen; ++ best_off = opt[last_pos].off; ++ cur = last_pos - best_mlen; ++ ++ /* store sequence */ ++_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ ++ opt[0].mlen = 1; ++ ++ while (1) { ++ mlen = opt[cur].mlen; ++ offset = opt[cur].off; ++ opt[cur].mlen = best_mlen; ++ opt[cur].off = best_off; ++ best_mlen = mlen; ++ best_off = offset; ++ if (mlen > cur) break; ++ cur -= mlen; ++ } ++ ++ for (u = 0; u <= last_pos; ) { ++ u += opt[u].mlen; ++ } ++ ++ for (cur=0; cur < last_pos; ) { ++ mlen = opt[cur].mlen; ++ if (mlen == 1) { ip++; cur++; continue; } ++ offset = opt[cur].off; ++ cur += mlen; ++ litLength = (U32)(ip - anchor); ++ ++ if (offset > ZSTD_REP_MOVE_OPT) { ++ rep[2] = rep[1]; ++ rep[1] = rep[0]; ++ rep[0] = offset - ZSTD_REP_MOVE_OPT; ++ offset--; ++ } else { ++ if (offset != 0) { ++ best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); ++ if (offset != 1) rep[2] = rep[1]; ++ rep[1] = rep[0]; ++ rep[0] = best_off; ++ } ++ ++ if (litLength==0) offset--; ++ } ++ ++ ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); ++ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); ++ anchor = ip = ip + mlen; ++ } } /* for (cur=0; cur < last_pos; ) */ ++ ++ /* Save reps for next block */ ++ { int i; for (i=0; irepToConfirm[i] = rep[i]; } ++ ++ /* Last Literals */ ++ { size_t lastLLSize = iend - anchor; ++ memcpy(seqStorePtr->lit, anchor, lastLLSize); ++ seqStorePtr->lit += lastLLSize; ++ } ++} ++ ++#endif /* ZSTD_OPT_H_91842398743 */