zstd/lib/compress/huf_compress.c

692 lines
28 KiB
C
Raw Normal View History

/* ******************************************************************
Huffman encoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* **************************************************************
* Includes
****************************************************************/
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
#include "bitstream.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "fse.h" /* header compression */
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "error_private.h"
/* **************************************************************
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
2017-07-25 12:52:01 -07:00
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
2016-05-20 05:36:36 -07:00
/* **************************************************************
* Utils
****************************************************************/
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
}
/* *******************************************************
* HUF : Huffman block compression
*********************************************************/
/* HUF_compressWeights() :
* Same as FSE_compress(), but dedicated to huff0's weights compression.
* The use case needs much less stack memory.
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
*/
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
U32 maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
U32 count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
/* init conditions */
if (wtSize <= 1) return 0; /* Not compressible */
/* Scan input and build symbol stats */
{ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
/* Write table description header */
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
op += hSize;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
return op-ostart;
}
struct HUF_CElt_s {
U16 val;
BYTE nbBits;
}; /* typedef'd to HUF_CElt within "huf.h" */
/*! HUF_writeCTable() :
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
{
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
BYTE* op = (BYTE*)dst;
U32 n;
/* check conditions */
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
/* convert to weight */
bitsToWeight[0] = 0;
for (n=1; n<huffLog+1; n++)
bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; n<maxSymbolValue; n++)
huffWeight[n] = bitsToWeight[CTable[n].nbBits];
/* attempt weights compression by FSE */
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
} }
/* write raw values as 4-bits (max : 15) */
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
for (n=0; n<maxSymbolValue; n+=2)
op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
return ((maxSymbolValue+1)/2) + 1;
}
size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
2016-05-20 05:36:36 -07:00
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
/* check result */
2016-05-20 05:36:36 -07:00
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
} }
/* fill nbBits */
2016-07-24 05:26:11 -07:00
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
} }
/* fill val */
2016-10-18 11:27:52 -07:00
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
/* determine stating value per rank */
2016-10-18 11:27:52 -07:00
valPerRank[tableLog+1] = 0; /* for w==0 */
{ U16 min = 0;
2016-10-18 11:27:52 -07:00
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
/* assign value within rank, symbol order */
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
}
*maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
typedef struct nodeElt_s {
U32 count;
U16 parent;
BYTE byte;
BYTE nbBits;
} nodeElt;
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
const U32 baseCost = 1 << (largestBits - maxNbBits);
U32 n = lastNonNull;
while (huffNode[n].nbBits > maxNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
huffNode[n].nbBits = (BYTE)maxNbBits;
n --;
} /* n stops at huffNode[n].nbBits <= maxNbBits */
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
/* renorm totalCost */
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
/* repay normalized cost */
{ U32 const noSymbol = 0xF0F0F0F0;
2016-07-13 08:38:39 -07:00
U32 rankLast[HUF_TABLELOG_MAX+2];
int pos;
/* Get pos of last (smallest) symbol per rank */
memset(rankLast, 0xF0, sizeof(rankLast));
{ U32 currentNbBits = maxNbBits;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
rankLast[maxNbBits-currentNbBits] = pos;
} }
while (totalCost > 0) {
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 highPos = rankLast[nBitsToDecrease];
U32 lowPos = rankLast[nBitsToDecrease-1];
if (highPos == noSymbol) continue;
if (lowPos == noSymbol) break;
{ U32 const highTotal = huffNode[highPos].count;
U32 const lowTotal = 2 * huffNode[lowPos].count;
if (highTotal <= lowTotal) break;
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
nBitsToDecrease ++;
totalCost -= 1 << (nBitsToDecrease-1);
if (rankLast[nBitsToDecrease-1] == noSymbol)
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
huffNode[rankLast[nBitsToDecrease]].nbBits ++;
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
} } /* while (totalCost > 0) */
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
while (huffNode[n].nbBits == maxNbBits) n--;
huffNode[n+1].nbBits--;
rankLast[1] = n+1;
totalCost++;
continue;
}
huffNode[ rankLast[1] + 1 ].nbBits--;
rankLast[1]++;
totalCost ++;
} } } /* there are several too large elements (at least >= 2) */
return maxNbBits;
}
typedef struct {
U32 base;
U32 current;
} rankPos;
static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
{
rankPos rank[32];
U32 n;
memset(rank, 0, sizeof(rank));
for (n=0; n<=maxSymbolValue; n++) {
U32 r = BIT_highbit32(count[n] + 1);
rank[r].base ++;
}
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
for (n=0; n<32; n++) rank[n].current = rank[n].base;
for (n=0; n<=maxSymbolValue; n++) {
U32 const c = count[n];
U32 const r = BIT_highbit32(c+1) + 1;
U32 pos = rank[r].current++;
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
huffNode[pos].count = c;
huffNode[pos].byte = (BYTE)n;
}
}
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
*/
2016-05-20 05:36:36 -07:00
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
nodeElt* const huffNode0 = (nodeElt*)workSpace;
nodeElt* const huffNode = huffNode0+1;
U32 n, nonNullRank;
int lowS, lowN;
U16 nodeNb = STARTNODE;
U32 nodeRoot;
/* safety checks */
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */
2016-05-20 05:36:36 -07:00
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue);
/* init for parents */
nonNullRank = maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
nodeNb++; lowS-=2;
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
/* create parents */
while (nodeNb <= nodeRoot) {
U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
huffNode[n1].parent = huffNode[n2].parent = nodeNb;
nodeNb++;
}
/* distribute weights (unlimited tree height) */
huffNode[nodeRoot].nbBits = 0;
for (n=nodeRoot-1; n>=STARTNODE; n--)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
/* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
/* fill result into tree (val, nbBits) */
2016-05-20 05:36:36 -07:00
{ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
for (n=0; n<=nonNullRank; n++)
nbPerRank[huffNode[n].nbBits]++;
/* determine stating value per rank */
{ U16 min = 0;
for (n=maxNbBits; n>0; n--) {
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
for (n=0; n<=maxSymbolValue; n++)
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
for (n=0; n<=maxSymbolValue; n++)
tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
}
return maxNbBits;
}
/** HUF_buildCTable() :
* @return : maxNbBits
* Note : count is used before tree is written, so they can safely overlap
*/
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
{
huffNodeTable nodeTable;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
}
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
size_t nbBits = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
nbBits += CTable[s].nbBits * count[s];
}
return nbBits >> 3;
}
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
int bad = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
}
return !bad;
}
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
{
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
}
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
[libzstd] Fix bug in Huffman encoding Summary: Huffman encoding with a bad dictionary can encode worse than the HUF_BLOCKBOUND(srcSize), since we don't filter out incompressible input, and even if we did, the dictionaries Huffman table could be ill suited to compressing actual data. The fast optimization doesn't seem to improve compression speed, even when I hard coded fast = 1, the speed didn't improve over hard coding it to 0. Benchmarks: $ ./zstd.dev -b1e5 Benchmarking levels from 1 to 5 1#Synthetic 50% : 10000000 -> 3139163 (3.186), 524.8 MB/s ,1890.0 MB/s 2#Synthetic 50% : 10000000 -> 3115138 (3.210), 372.6 MB/s ,1830.2 MB/s 3#Synthetic 50% : 10000000 -> 3222672 (3.103), 223.3 MB/s ,1400.2 MB/s 4#Synthetic 50% : 10000000 -> 3276678 (3.052), 198.0 MB/s ,1280.1 MB/s 5#Synthetic 50% : 10000000 -> 3271570 (3.057), 107.8 MB/s ,1200.0 MB/s $ ./zstd -b1e5 Benchmarking levels from 1 to 5 1#Synthetic 50% : 10000000 -> 3139163 (3.186), 524.8 MB/s ,1870.2 MB/s 2#Synthetic 50% : 10000000 -> 3115138 (3.210), 370.0 MB/s ,1810.3 MB/s 3#Synthetic 50% : 10000000 -> 3222672 (3.103), 223.3 MB/s ,1380.1 MB/s 4#Synthetic 50% : 10000000 -> 3276678 (3.052), 196.1 MB/s ,1270.0 MB/s 5#Synthetic 50% : 10000000 -> 3271570 (3.057), 106.8 MB/s ,1180.1 MB/s $ ./zstd.dev -b1e5 ../silesia.tar Benchmarking levels from 1 to 5 1#silesia.tar : 211988480 -> 73651685 (2.878), 429.7 MB/s ,1096.5 MB/s 2#silesia.tar : 211988480 -> 70158785 (3.022), 321.2 MB/s ,1029.1 MB/s 3#silesia.tar : 211988480 -> 66993813 (3.164), 243.7 MB/s , 981.4 MB/s 4#silesia.tar : 211988480 -> 66306481 (3.197), 226.7 MB/s , 972.4 MB/s 5#silesia.tar : 211988480 -> 64757852 (3.274), 150.3 MB/s , 963.6 MB/s $ ./zstd -b1e5 ../silesia.tar Benchmarking levels from 1 to 5 1#silesia.tar : 211988480 -> 73651685 (2.878), 429.7 MB/s ,1087.1 MB/s 2#silesia.tar : 211988480 -> 70158785 (3.022), 318.8 MB/s ,1029.1 MB/s 3#silesia.tar : 211988480 -> 66993813 (3.164), 246.5 MB/s , 981.4 MB/s 4#silesia.tar : 211988480 -> 66306481 (3.197), 229.2 MB/s , 972.4 MB/s 5#silesia.tar : 211988480 -> 64757852 (3.274), 149.3 MB/s , 963.6 MB/s Test Plan: I added a test case to the fuzzer which crashed with ASAN before the patch and succeeded after.
2017-07-18 11:21:19 -07:00
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
#define HUF_FLUSHBITS_1(stream) \
2016-05-20 05:36:36 -07:00
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
#define HUF_FLUSHBITS_2(stream) \
2016-05-20 05:36:36 -07:00
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
size_t n;
BIT_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
if (HUF_isError(initErr)) return 0; }
n = srcSize & ~3; /* join to mod 4 */
switch (srcSize & 3)
{
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
HUF_FLUSHBITS_2(&bitC);
/* fall-through */
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
HUF_FLUSHBITS_1(&bitC);
/* fall-through */
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
HUF_FLUSHBITS(&bitC);
/* fall-through */
case 0 : /* fall-through */
default: break;
}
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
HUF_FLUSHBITS_2(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
HUF_FLUSHBITS(&bitC);
}
return BIT_closeCStream(&bitC);
}
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
2016-05-20 05:36:36 -07:00
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
if (srcSize < 12) return 0; /* no saving possible : too small input */
op += 6; /* jumpTable */
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
2016-05-20 05:36:36 -07:00
if (cSize==0) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
2016-05-20 05:36:36 -07:00
if (cSize==0) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
2016-05-20 05:36:36 -07:00
if (cSize==0) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
2016-05-20 05:36:36 -07:00
if (cSize==0) return 0;
op += cSize;
}
return op-ostart;
}
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
unsigned singleStream, const HUF_CElt* CTable)
{
size_t const cSize = singleStream ?
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) :
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
/* check compressibility */
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
return op-ostart;
}
/* `workSpace` must a table of at least 1024 unsigned */
static size_t HUF_compress_internal (
void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
unsigned singleStream,
void* workSpace, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
2017-03-02 16:38:07 -08:00
U32* count;
size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
HUF_CElt* CTable;
size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
/* checks & inits */
2017-03-02 16:38:07 -08:00
if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
2016-05-20 05:36:36 -07:00
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
if (!dstSize) return 0; /* cannot fit within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
2017-03-02 16:38:07 -08:00
count = (U32*)workSpace;
workSpace = (BYTE*)workSpace + countSize;
wkspSize -= countSize;
CTable = (HUF_CElt*)workSpace;
workSpace = (BYTE*)workSpace + CTableSize;
wkspSize -= CTableSize;
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
/* Scan input and build symbol stats */
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
{ CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
2016-05-20 05:36:36 -07:00
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
}
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
/* Check validity of previous table */
if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
/* Build Huffman Tree */
2016-05-20 05:36:36 -07:00
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
2016-05-20 05:36:36 -07:00
huffLog = (U32)maxBits;
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
/* Zero the unused symbols so we can check it for validity */
2017-03-02 16:38:07 -08:00
memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
2016-05-20 05:36:36 -07:00
}
/* Write table description header */
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
/* Check if using the previous table will be beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
}
/* Use the new table */
if (hSize + 12ul >= srcSize) { return 0; }
2016-05-20 05:36:36 -07:00
op += hSize;
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
if (repeat) { *repeat = HUF_repeat_none; }
2017-03-02 16:38:07 -08:00
if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */
2016-05-20 05:36:36 -07:00
}
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
}
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
}
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
}
size_t HUF_compress1X (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog)
{
unsigned workSpace[1024];
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
}
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
}
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
Allow compressor to repeat Huffman tables * Compressor saves most recently used Huffman table and reuses it if it produces better results. * I attempted to preserve CPU usage profile. I intentionally left all of the existing heuristics in place. There is only a speed difference on the second block and later. When compressing large enough blocks (say >= 4 KiB) there is no significant difference in compression speed. Dictionary compression of one block is the same speed for blocks with literals <= 1 KiB, and after that the difference is not very significant. * In the synthetic data, with blocks 10 KB or smaller, most blocks can't use repeated tables because the previous block did not contain a symbol that the current block contains. Once blocks are about 12 KB or more, most previous blocks have valid Huffman tables for the current block, and the compression ratio and decompression speed jumped. * In silesia blocks as small as 4KB can frequently reuse the previous Huffman table (85%), but it isn't as profitable, and the previous Huffman table only gets used about 3% of the time. * Microbenchmarks show that `HUF_validateCTable()` takes ~55 ns and `HUF_estimateCompressedSize()` takes ~35 ns. They are decently well optimized, the first versions took 90 ns and 120 ns respectively. `HUF_validateCTable()` could be twice as fast, if we cast the `HUF_CElt*` to a `U32*` and compare to 0. However, `U32` has an alignment of 4 instead of 2, so I think that might be undefined behavior. * I've ran `zstreamtest` compiled normally, with UASAN and with MSAN for 4 hours each. The worst case for the speed difference is a bunch of small blocks in the same frame. I modified `bench.c` to compress the input in a single frame but with blocks of the given block size, set by `-B`. Benchmarks on level 1: | Program | Block size | Corpus | Ratio | Compression MB/s | Decompression MB/s | |-----------|------------|-----------|-------|------------------|--------------------| | zstd.base | 256 | synthetic | 2.364 | 110.0 | 297.0 | | zstd | 256 | synthetic | 2.367 | 108.9 | 297.0 | | zstd.base | 256 | silesia | 2.204 | 93.8 | 415.7 | | zstd | 256 | silesia | 2.204 | 93.4 | 415.7 | | zstd.base | 512 | synthetic | 2.594 | 144.2 | 420.0 | | zstd | 512 | synthetic | 2.599 | 141.5 | 425.7 | | zstd.base | 512 | silesia | 2.358 | 118.4 | 432.6 | | zstd | 512 | silesia | 2.358 | 119.8 | 432.6 | | zstd.base | 1024 | synthetic | 2.790 | 192.3 | 594.1 | | zstd | 1024 | synthetic | 2.794 | 192.3 | 600.0 | | zstd.base | 1024 | silesia | 2.524 | 148.2 | 464.2 | | zstd | 1024 | silesia | 2.525 | 148.2 | 467.6 | | zstd.base | 4096 | synthetic | 3.023 | 300.0 | 1000.0 | | zstd | 4096 | synthetic | 3.024 | 300.0 | 1010.1 | | zstd.base | 4096 | silesia | 2.779 | 223.1 | 623.5 | | zstd | 4096 | silesia | 2.779 | 223.1 | 636.0 | | zstd.base | 16384 | synthetic | 3.131 | 350.0 | 1150.1 | | zstd | 16384 | synthetic | 3.152 | 350.0 | 1630.3 | | zstd.base | 16384 | silesia | 2.871 | 296.5 | 883.3 | | zstd | 16384 | silesia | 2.872 | 294.4 | 898.3 |
2017-03-01 17:51:56 -08:00
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
}
size_t HUF_compress2 (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog)
{
unsigned workSpace[1024];
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
}
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
2016-05-20 05:36:36 -07:00
return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT);
}