commit
24bc2e062d
|
@ -23,6 +23,7 @@ zstdmt
|
|||
# Test artefacts
|
||||
tmp*
|
||||
dictionary.
|
||||
dictionary
|
||||
NUL
|
||||
|
||||
# Build artefacts
|
||||
|
|
4
Makefile
4
Makefile
|
@ -69,6 +69,7 @@ test: MOREFLAGS += -g -DDEBUGLEVEL=$(DEBUGLEVEL) -Werror
|
|||
test:
|
||||
MOREFLAGS="$(MOREFLAGS)" $(MAKE) -j -C $(PRGDIR) allVariants
|
||||
$(MAKE) -C $(TESTDIR) $@
|
||||
ZSTD=../../programs/zstd $(MAKE) -C doc/educational_decoder test
|
||||
|
||||
## shortest: same as `make check`
|
||||
.PHONY: shortest
|
||||
|
@ -99,8 +100,8 @@ man:
|
|||
contrib: lib
|
||||
$(MAKE) -C contrib/pzstd all
|
||||
$(MAKE) -C contrib/seekable_format/examples all
|
||||
$(MAKE) -C contrib/adaptive-compression all
|
||||
$(MAKE) -C contrib/largeNbDicts all
|
||||
cd contrib/single_file_decoder/ ; ./build_test.sh
|
||||
|
||||
.PHONY: cleanTabs
|
||||
cleanTabs:
|
||||
|
@ -116,7 +117,6 @@ clean:
|
|||
@$(MAKE) -C contrib/gen_html $@ > $(VOID)
|
||||
@$(MAKE) -C contrib/pzstd $@ > $(VOID)
|
||||
@$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID)
|
||||
@$(MAKE) -C contrib/adaptive-compression $@ > $(VOID)
|
||||
@$(MAKE) -C contrib/largeNbDicts $@ > $(VOID)
|
||||
@$(RM) zstd$(EXT) zstdmt$(EXT) tmp*
|
||||
@$(RM) -r lz4
|
||||
|
|
|
@ -510,6 +510,10 @@
|
|||
RelativePath="..\..\..\lib\compress\zstd_compress_sequences.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_cwksp.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_fast.h"
|
||||
>
|
||||
|
|
|
@ -546,6 +546,10 @@
|
|||
RelativePath="..\..\..\lib\compress\zstd_compress_sequences.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_cwksp.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_fast.h"
|
||||
>
|
||||
|
|
|
@ -626,6 +626,10 @@
|
|||
RelativePath="..\..\..\lib\compress\zstd_compress_sequences.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_cwksp.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_fast.h"
|
||||
>
|
||||
|
|
|
@ -558,6 +558,10 @@
|
|||
RelativePath="..\..\..\lib\compress\zstd_compress_sequences.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_cwksp.h"
|
||||
>
|
||||
</File>
|
||||
<File
|
||||
RelativePath="..\..\..\lib\compress\zstd_fast.h"
|
||||
>
|
||||
|
|
|
@ -197,6 +197,7 @@
|
|||
<ClInclude Include="..\..\..\lib\compress\zstd_compress.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_literals.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_sequences.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_cwksp.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_double_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_lazy.h" />
|
||||
|
|
|
@ -200,6 +200,7 @@
|
|||
<ClInclude Include="..\..\..\lib\compress\zstd_compress.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_literals.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_sequences.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_cwksp.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_double_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_lazy.h" />
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
<ClInclude Include="..\..\..\lib\compress\zstd_compress.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_literals.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_sequences.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_cwksp.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_double_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_lazy.h" />
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
<ClInclude Include="..\..\..\lib\compress\zstd_compress.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_literals.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_sequences.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_cwksp.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_double_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_lazy.h" />
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
<ClInclude Include="..\..\..\lib\compress\zstd_compress.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_literals.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_compress_sequences.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_cwksp.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_double_fast.h" />
|
||||
<ClInclude Include="..\..\..\lib\compress\zstd_lazy.h" />
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# binaries generated
|
||||
adapt
|
||||
datagen
|
|
@ -1,76 +0,0 @@
|
|||
|
||||
ZSTDDIR = ../../lib
|
||||
PRGDIR = ../../programs
|
||||
ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c
|
||||
ZSTDCOMP_FILES := $(ZSTDDIR)/compress/*.c
|
||||
ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c
|
||||
ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES)
|
||||
|
||||
MULTITHREAD_LDFLAGS = -pthread
|
||||
DEBUGFLAGS= -g -DZSTD_DEBUG=1
|
||||
CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
|
||||
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
|
||||
CFLAGS ?= -O3
|
||||
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
|
||||
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
|
||||
-Wstrict-prototypes -Wundef \
|
||||
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
|
||||
-Wredundant-decls
|
||||
CFLAGS += $(DEBUGFLAGS)
|
||||
CFLAGS += $(MOREFLAGS)
|
||||
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS)
|
||||
|
||||
all: adapt datagen
|
||||
|
||||
adapt: $(ZSTD_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c adapt.c
|
||||
$(CC) $(FLAGS) $^ -o $@
|
||||
|
||||
adapt-debug: $(ZSTD_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c adapt.c
|
||||
$(CC) $(FLAGS) -DDEBUG_MODE=2 $^ -o adapt
|
||||
|
||||
datagen : $(PRGDIR)/datagen.c datagencli.c
|
||||
$(CC) $(FLAGS) $^ -o $@
|
||||
|
||||
test-adapt-correctness: datagen adapt
|
||||
@./test-correctness.sh
|
||||
@echo "test correctness complete"
|
||||
|
||||
test-adapt-performance: datagen adapt
|
||||
@./test-performance.sh
|
||||
@echo "test performance complete"
|
||||
|
||||
clean:
|
||||
@$(RM) -f adapt datagen
|
||||
@$(RM) -rf *.dSYM
|
||||
@$(RM) -f tmp*
|
||||
@$(RM) -f tests/*.zst
|
||||
@$(RM) -f tests/tmp*
|
||||
@echo "finished cleaning"
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets
|
||||
#-----------------------------------------------------------------------------
|
||||
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
|
||||
|
||||
ifneq (,$(filter $(shell uname),SunOS))
|
||||
INSTALL ?= ginstall
|
||||
else
|
||||
INSTALL ?= install
|
||||
endif
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
DESTDIR ?=
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
|
||||
INSTALL_PROGRAM ?= $(INSTALL) -m 755
|
||||
|
||||
install: adapt
|
||||
@echo Installing binaries
|
||||
@$(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/
|
||||
@$(INSTALL_PROGRAM) adapt $(DESTDIR)$(BINDIR)/zstd-adaptive
|
||||
@echo zstd-adaptive installation completed
|
||||
|
||||
uninstall:
|
||||
@$(RM) $(DESTDIR)$(BINDIR)/zstd-adaptive
|
||||
@echo zstd-adaptive programs successfully uninstalled
|
||||
endif
|
|
@ -1,91 +0,0 @@
|
|||
### Summary
|
||||
|
||||
`adapt` is a new compression tool targeted at optimizing performance across network connections and pipelines. The tool is aimed at sensing network speeds and adapting compression level based on network or pipe speeds.
|
||||
In situations where the compression level does not appropriately match the network/pipe speed, compression may be bottlenecking the entire pipeline or the files may not be compressed as much as they potentially could be, therefore losing efficiency. It also becomes quite impractical to manually measure and set an optimalcompression level (which could potentially change over time).
|
||||
|
||||
### Using `adapt`
|
||||
|
||||
In order to build and use the tool, you can simply run `make adapt` in the `adaptive-compression` directory under `contrib`. This will generate an executable available for use. Another possible method of installation is running `make install`, which will create and install the binary as the command `zstd-adaptive`.
|
||||
|
||||
Similar to many other compression utilities, `zstd-adaptive` can be invoked by using the following format:
|
||||
|
||||
`zstd-adaptive [options] [file(s)]`
|
||||
|
||||
Supported options for the above format are described below.
|
||||
|
||||
`zstd-adaptive` also supports reading from `stdin` and writing to `stdout`, which is potentially more useful. By default, if no files are given, `zstd-adaptive` reads from and writes to standard I/O. Therefore, you can simply insert it within a pipeline like so:
|
||||
|
||||
`cat FILE | zstd-adaptive | ssh "cat - > tmp.zst"`
|
||||
|
||||
If a file is provided, it is also possible to force writing to stdout using the `-c` flag like so:
|
||||
|
||||
`zstd-adaptive -c FILE | ssh "cat - > tmp.zst"`
|
||||
|
||||
Several options described below can be used to control the behavior of `zstd-adaptive`. More specifically, using the `-l#` and `-u#` flags will will set upper and lower bounds so that the compression level will always be within that range. The `-i#` flag can also be used to change the initial compression level. If an initial compression level is not provided, the initial compression level will be chosen such that it is within the appropriate range (it becomes equal to the lower bound).
|
||||
|
||||
### Options
|
||||
`-oFILE` : write output to `FILE`
|
||||
|
||||
`-i#` : provide initial compression level (must within the appropriate bounds)
|
||||
|
||||
`-h` : display help/information
|
||||
|
||||
`-f` : force the compression level to stay constant
|
||||
|
||||
`-c` : force write to `stdout`
|
||||
|
||||
`-p` : hide progress bar
|
||||
|
||||
`-q` : quiet mode -- do not show progress bar or other information
|
||||
|
||||
`-l#` : set a lower bound on the compression level (default is 1)
|
||||
|
||||
`-u#` : set an upper bound on the compression level (default is 22)
|
||||
### Benchmarking / Test results
|
||||
#### Artificial Tests
|
||||
These artificial tests were run by using the `pv` command line utility in order to limit pipe speeds (25 MB/s read and 5 MB/s write limits were chosen to mimic severe throughput constraints). A 40 GB backup file was sent through a pipeline, compressed, and written out to a file. Compression time, size, and ratio were computed. Data for `zstd -15` was excluded from these tests because the test runs quite long.
|
||||
|
||||
<table>
|
||||
<tr><th> 25 MB/s read limit </th></tr>
|
||||
<tr><td>
|
||||
|
||||
| Compressor Name | Ratio | Compressed Size | Compression Time |
|
||||
|:----------------|------:|----------------:|-----------------:|
|
||||
| zstd -3 | 2.108 | 20.718 GB | 29m 48.530s |
|
||||
| zstd-adaptive | 2.230 | 19.581 GB | 29m 48.798s |
|
||||
|
||||
</td><tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr><th> 5 MB/s write limit </th></tr>
|
||||
<tr><td>
|
||||
|
||||
| Compressor Name | Ratio | Compressed Size | Compression Time |
|
||||
|:----------------|------:|----------------:|-----------------:|
|
||||
| zstd -3 | 2.108 | 20.718 GB | 1h 10m 43.076s |
|
||||
| zstd-adaptive | 2.249 | 19.412 GB | 1h 06m 15.577s |
|
||||
|
||||
</td></tr>
|
||||
</table>
|
||||
|
||||
The commands used for this test generally followed the form:
|
||||
|
||||
`cat FILE | pv -L 25m -q | COMPRESSION | pv -q > tmp.zst # impose 25 MB/s read limit`
|
||||
|
||||
`cat FILE | pv -q | COMPRESSION | pv -L 5m -q > tmp.zst # impose 5 MB/s write limit`
|
||||
|
||||
#### SSH Tests
|
||||
|
||||
The following tests were performed by piping a relatively large backup file (approximately 80 GB) through compression and over SSH to be stored on a server. The test data includes statistics for time and compressed size on `zstd` at several compression levels, as well as `zstd-adaptive`. The data highlights the potential advantages that `zstd-adaptive` has over using a low static compression level and the negative imapcts that using an excessively high static compression level can have on
|
||||
pipe throughput.
|
||||
|
||||
| Compressor Name | Ratio | Compressed Size | Compression Time |
|
||||
|:----------------|------:|----------------:|-----------------:|
|
||||
| zstd -3 | 2.212 | 32.426 GB | 1h 17m 59.756s |
|
||||
| zstd -15 | 2.374 | 30.213 GB | 2h 56m 59.441s |
|
||||
| zstd-adaptive | 2.315 | 30.993 GB | 1h 18m 52.860s |
|
||||
|
||||
The commands used for this test generally followed the form:
|
||||
|
||||
`cat FILE | COMPRESSION | ssh dev "cat - > tmp.zst"`
|
File diff suppressed because it is too large
Load Diff
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Dependencies
|
||||
**************************************/
|
||||
#include "util.h" /* Compiler options */
|
||||
#include <stdio.h> /* fprintf, stderr */
|
||||
#include "datagen.h" /* RDG_generate */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Constants
|
||||
**************************************/
|
||||
#define KB *(1 <<10)
|
||||
#define MB *(1 <<20)
|
||||
#define GB *(1U<<30)
|
||||
|
||||
#define SIZE_DEFAULT ((64 KB) + 1)
|
||||
#define SEED_DEFAULT 0
|
||||
#define COMPRESSIBILITY_DEFAULT 50
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Macros
|
||||
**************************************/
|
||||
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
|
||||
#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
|
||||
static unsigned displayLevel = 2;
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Command line
|
||||
*********************************************************/
|
||||
static int usage(const char* programName)
|
||||
{
|
||||
DISPLAY( "Compressible data generator\n");
|
||||
DISPLAY( "Usage :\n");
|
||||
DISPLAY( " %s [args]\n", programName);
|
||||
DISPLAY( "\n");
|
||||
DISPLAY( "Arguments :\n");
|
||||
DISPLAY( " -g# : generate # data (default:%i)\n", SIZE_DEFAULT);
|
||||
DISPLAY( " -s# : Select seed (default:%i)\n", SEED_DEFAULT);
|
||||
DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n",
|
||||
COMPRESSIBILITY_DEFAULT);
|
||||
DISPLAY( " -h : display help and exit\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
{
|
||||
unsigned probaU32 = COMPRESSIBILITY_DEFAULT;
|
||||
double litProba = 0.0;
|
||||
U64 size = SIZE_DEFAULT;
|
||||
U32 seed = SEED_DEFAULT;
|
||||
const char* const programName = argv[0];
|
||||
|
||||
int argNb;
|
||||
for(argNb=1; argNb<argc; argNb++) {
|
||||
const char* argument = argv[argNb];
|
||||
|
||||
if(!argument) continue; /* Protection if argument empty */
|
||||
|
||||
/* Handle commands. Aggregated commands are allowed */
|
||||
if (*argument=='-') {
|
||||
argument++;
|
||||
while (*argument!=0) {
|
||||
switch(*argument)
|
||||
{
|
||||
case 'h':
|
||||
return usage(programName);
|
||||
case 'g':
|
||||
argument++;
|
||||
size=0;
|
||||
while ((*argument>='0') && (*argument<='9'))
|
||||
size *= 10, size += *argument++ - '0';
|
||||
if (*argument=='K') { size <<= 10; argument++; }
|
||||
if (*argument=='M') { size <<= 20; argument++; }
|
||||
if (*argument=='G') { size <<= 30; argument++; }
|
||||
if (*argument=='B') { argument++; }
|
||||
break;
|
||||
case 's':
|
||||
argument++;
|
||||
seed=0;
|
||||
while ((*argument>='0') && (*argument<='9'))
|
||||
seed *= 10, seed += *argument++ - '0';
|
||||
break;
|
||||
case 'P':
|
||||
argument++;
|
||||
probaU32 = 0;
|
||||
while ((*argument>='0') && (*argument<='9'))
|
||||
probaU32 *= 10, probaU32 += *argument++ - '0';
|
||||
if (probaU32>100) probaU32 = 100;
|
||||
break;
|
||||
case 'L': /* hidden argument : Literal distribution probability */
|
||||
argument++;
|
||||
litProba=0.;
|
||||
while ((*argument>='0') && (*argument<='9'))
|
||||
litProba *= 10, litProba += *argument++ - '0';
|
||||
if (litProba>100.) litProba=100.;
|
||||
litProba /= 100.;
|
||||
break;
|
||||
case 'v':
|
||||
displayLevel = 4;
|
||||
argument++;
|
||||
break;
|
||||
default:
|
||||
return usage(programName);
|
||||
}
|
||||
} } } /* for(argNb=1; argNb<argc; argNb++) */
|
||||
|
||||
DISPLAYLEVEL(4, "Compressible data Generator \n");
|
||||
if (probaU32!=COMPRESSIBILITY_DEFAULT)
|
||||
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
|
||||
DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed);
|
||||
|
||||
RDG_genStdout(size, (double)probaU32/100, litProba, seed);
|
||||
DISPLAYLEVEL(1, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,252 +0,0 @@
|
|||
echo "correctness tests -- general"
|
||||
./datagen -s1 -g1GB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s2 -g500MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s3 -g250MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s4 -g125MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s5 -g50MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s6 -g25MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s7 -g10MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s8 -g5MB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s9 -g500KB > tmp
|
||||
./adapt -otmp.zst tmp
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- streaming"
|
||||
./datagen -s10 -g1GB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s11 -g100MB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s12 -g10MB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s13 -g1MB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s14 -g100KB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s15 -g10KB > tmp
|
||||
cat tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- read limit"
|
||||
./datagen -s16 -g1GB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s17 -g100MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s18 -g10MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s19 -g1MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s20 -g100KB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s21 -g10KB > tmp
|
||||
pv -L 50m -q tmp | ./adapt > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- write limit"
|
||||
./datagen -s22 -g1GB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s23 -g100MB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s24 -g10MB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s25 -g1MB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s26 -g100KB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s27 -g10KB > tmp
|
||||
pv -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- read and write limits"
|
||||
./datagen -s28 -g1GB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s29 -g100MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s30 -g10MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s31 -g1MB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s32 -g100KB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s33 -g10KB > tmp
|
||||
pv -L 50m -q tmp | ./adapt | pv -L 5m -q > tmp.zst
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- forced compression level"
|
||||
./datagen -s34 -g1GB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s35 -g100MB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s36 -g10MB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s37 -g1MB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s38 -g100KB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
./datagen -s39 -g10KB > tmp
|
||||
./adapt tmp -otmp.zst -i11 -f
|
||||
zstd -d tmp.zst -o tmp2
|
||||
diff -s -q tmp tmp2
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- window size test"
|
||||
./datagen -s39 -g1GB | pv -L 25m -q | ./adapt -i1 | pv -q > tmp.zst
|
||||
zstd -d tmp.zst
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ncorrectness tests -- testing bounds"
|
||||
./datagen -s40 -g1GB | pv -L 25m -q | ./adapt -i1 -u4 | pv -q > tmp.zst
|
||||
rm tmp*
|
||||
|
||||
./datagen -s41 -g1GB | ./adapt -i14 -l4 > tmp.zst
|
||||
rm tmp*
|
||||
make clean
|
|
@ -1,59 +0,0 @@
|
|||
echo "testing time -- no limits set"
|
||||
./datagen -s1 -g1GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
rm tmp*
|
||||
|
||||
./datagen -s2 -g2GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
rm tmp*
|
||||
|
||||
./datagen -s3 -g4GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
rm tmp*
|
||||
|
||||
echo -e "\ntesting compression ratio -- no limits set"
|
||||
./datagen -s4 -g1GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
||||
|
||||
./datagen -s5 -g2GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
||||
|
||||
./datagen -s6 -g4GB > tmp
|
||||
time ./adapt -otmp1.zst tmp
|
||||
time zstd -1 -o tmp2.zst tmp
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
||||
|
||||
echo e "\ntesting performance at various compression levels -- no limits set"
|
||||
./datagen -s7 -g1GB > tmp
|
||||
echo "adapt"
|
||||
time ./adapt -i5 -f tmp -otmp1.zst
|
||||
echo "zstdcli"
|
||||
time zstd -5 tmp -o tmp2.zst
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
||||
|
||||
./datagen -s8 -g1GB > tmp
|
||||
echo "adapt"
|
||||
time ./adapt -i10 -f tmp -otmp1.zst
|
||||
echo "zstdcli"
|
||||
time zstd -10 tmp -o tmp2.zst
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
||||
|
||||
./datagen -s9 -g1GB > tmp
|
||||
echo "adapt"
|
||||
time ./adapt -i15 -f tmp -otmp1.zst
|
||||
echo "zstdcli"
|
||||
time zstd -15 tmp -o tmp2.zst
|
||||
ls -l tmp1.zst tmp2.zst
|
||||
rm tmp*
|
|
@ -232,7 +232,7 @@ If there is an error, the function will return an error code, which can be teste
|
|||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) (size + (size >> 7))
|
||||
#define FSE_BLOCKBOUND(size) (size + (size >> 7) + 4 /* constant for initial fse states */ )
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
zstddeclib.c
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Single File Zstandard Decompression Library
|
||||
|
||||
The script `combine.sh` creates an _amalgamated_ source file that can be used with or without `zstd.h`. This isn't a _header-only_ file but it does offer a similar level of simplicity when integrating into a project.
|
||||
|
||||
Create `zstddeclib.c` from the Zstd source using:
|
||||
```
|
||||
cd zstd/contrib/declib
|
||||
./combine.sh -r ../../lib -r ../../lib/common -r ../../lib/decompress -o zstddeclib.c zstddeclib-in.c
|
||||
```
|
||||
Then add the resulting file to your project (see the [example files](examples)).
|
||||
|
||||
`create_single_file_decoder.sh` will run the above script, creating file `zstddeclib.c`.
|
||||
`build_test.sh` will create the decoder, then compile and test the library.
|
||||
|
||||
Why?
|
||||
----
|
||||
|
||||
Because all it now takes to support decompressing Zstd is the addition of a single file, two if using the header, with no configuration or further build steps. The library is small, adding, for example, 26kB to an Emscripten compiled WebAssembly project. Native implementations add a little more, 40-70kB depending on the compiler and platform.
|
|
@ -0,0 +1,58 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Where to find the sources
|
||||
ZSTD_SRC_ROOT="../../lib"
|
||||
|
||||
# Temporary compiled binary
|
||||
OUT_FILE="tempbin"
|
||||
|
||||
# Optional temporary compiled WebAssembly
|
||||
OUT_WASM="temp.wasm"
|
||||
|
||||
# Amalgamate the sources
|
||||
./create_single_file_decoder.sh
|
||||
# Did combining work?
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Single file decoder creation script: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "Single file decoder creation script: PASSED"
|
||||
|
||||
# Compile the generated output
|
||||
cc -Wall -Wextra -Werror -Os -g0 -o $OUT_FILE examples/simple.c
|
||||
# Did compilation work?
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Compiling simple.c: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "Compiling simple.c: PASSED"
|
||||
|
||||
# Run then delete the compiled output
|
||||
./$OUT_FILE
|
||||
retVal=$?
|
||||
rm -f $OUT_FILE
|
||||
# Did the test work?
|
||||
if [ $retVal -ne 0 ]; then
|
||||
echo "Running simple.c: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "Running simple.c: PASSED"
|
||||
|
||||
# Is Emscripten available?
|
||||
which emcc > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "(Skipping Emscripten test)"
|
||||
else
|
||||
# Compile the Emscripten example
|
||||
CC_FLAGS="-Wall -Wextra -Werror -Os -g0 -flto --llvm-lto 3 -lGL -DNDEBUG=1"
|
||||
emcc $CC_FLAGS -s WASM=1 -o $OUT_WASM examples/emscripten.c
|
||||
# Did compilation work?
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Compiling emscripten.c: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "Compiling emscripten.c: PASSED"
|
||||
rm -f $OUT_WASM
|
||||
fi
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/sh -e
|
||||
|
||||
# Tool to bundle multiple C/C++ source files, inlining any includes.
|
||||
#
|
||||
# Note: this POSIX-compliant script is many times slower than the original bash
|
||||
# implementation (due to the grep calls) but it runs and works everywhere.
|
||||
#
|
||||
# TODO: ROOTS and FOUND as arrays (since they fail on paths with spaces)
|
||||
#
|
||||
# Author: Carl Woffenden, Numfum GmbH (released under a CC0 license)
|
||||
|
||||
# Common file roots
|
||||
ROOTS="./"
|
||||
|
||||
# Files previously visited
|
||||
FOUND=""
|
||||
|
||||
# Optional destination file (empty string to write to stdout)
|
||||
DESTN=""
|
||||
|
||||
# Prints the script usage then exits
|
||||
usage() {
|
||||
echo "Usage: $0 [-r <path>] [-o <outfile>] infile"
|
||||
echo " -r file root search paths"
|
||||
echo " -o output file (otherwise stdout)"
|
||||
echo "Example: $0 -r ../my/path - r ../other/path -o out.c in.c"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Tests that the grep implementation works as expected (older OSX grep fails)
|
||||
test_grep() {
|
||||
if ! echo '#include "foo"' | grep -Eq '^\s*#\s*include\s*".+"'; then
|
||||
echo "Aborting: the grep implementation fails to parse include lines"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Tests if list $1 has item $2 (returning zero on a match)
|
||||
list_has_item() {
|
||||
if echo "$1" | grep -Eq "(^|\s*)$2(\$|\s*)"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Adds a new line with the supplied arguments to $DESTN (or stdout)
|
||||
write_line() {
|
||||
if [ -n "$DESTN" ]; then
|
||||
printf '%s\n' "$@" >> "$DESTN"
|
||||
else
|
||||
printf '%s\n' "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Adds the contents of $1 with any of its includes inlined
|
||||
add_file() {
|
||||
# Match the path
|
||||
local file=
|
||||
if [ -f "$1" ]; then
|
||||
file="$1"
|
||||
else
|
||||
for root in $ROOTS; do
|
||||
if test -f "$root/$1"; then
|
||||
file="$root/$1"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if [ -n "$file" ]; then
|
||||
# Read the file
|
||||
local line=
|
||||
while IFS= read -r line; do
|
||||
if echo "$line" | grep -Eq '^\s*#\s*include\s*".+"'; then
|
||||
# We have an include directive so strip the (first) file
|
||||
local inc=$(echo "$line" | grep -Eo '".*"' | grep -Eo '\w*(\.?\w+)+' | head -1)
|
||||
if ! list_has_item "$FOUND" "$inc"; then
|
||||
# And we've not previously encountered it
|
||||
FOUND="$FOUND $inc"
|
||||
write_line "/**** start inlining $inc ****/"
|
||||
add_file "$inc"
|
||||
write_line "/**** ended inlining $inc ****/"
|
||||
else
|
||||
write_line "/**** skipping file: $inc ****/"
|
||||
fi
|
||||
else
|
||||
# Otherwise write the source line
|
||||
write_line "$line"
|
||||
fi
|
||||
done < "$file"
|
||||
else
|
||||
write_line "#error Unable to find \"$1\""
|
||||
fi
|
||||
}
|
||||
|
||||
while getopts ":r:o:" opts; do
|
||||
case $opts in
|
||||
r)
|
||||
ROOTS="$OPTARG $ROOTS"
|
||||
;;
|
||||
o)
|
||||
DESTN="$OPTARG"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
if [ -f "$1" ]; then
|
||||
if [ -n "$DESTN" ]; then
|
||||
printf "" > "$DESTN"
|
||||
fi
|
||||
test_grep
|
||||
add_file $1
|
||||
else
|
||||
echo "Input file not found: \"$1\""
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
usage
|
||||
fi
|
||||
exit 0
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Where to find the sources
|
||||
ZSTD_SRC_ROOT="../../lib"
|
||||
|
||||
|
||||
# Amalgamate the sources
|
||||
echo "Amalgamating files... this can take a while"
|
||||
./combine.sh -r "$ZSTD_SRC_ROOT" -r "$ZSTD_SRC_ROOT/common" -r "$ZSTD_SRC_ROOT/decompress" -o zstddeclib.c zstddeclib-in.c
|
||||
# Did combining work?
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Combine script: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "Combine script: PASSED"
|
|
@ -0,0 +1,9 @@
|
|||
# Single File ZStandard Examples
|
||||
|
||||
The examples `#include` the generated `zstddeclib.c` directly but work equally as well when including `zstd.h` and compiling the amalgamated source separately.
|
||||
|
||||
`simple.c` is the most basic example of decompressing content and verifying the result.
|
||||
|
||||
`emscripten.c` is a bare-bones [Emscripten](https://github.com/emscripten-core/emscripten) compiled WebGL demo using Zstd to further compress a DXT1 texture (see the [original PNG image](testcard.png)). The 256x256 texture would normally be 32kB, but even when bundled with the Zstd decompressor the resulting WebAssembly weighs in at 41kB (`shell.html` is a support file to run the Wasm).
|
||||
|
||||
The example files in this directory are released under a [Creative Commons Zero license](https://creativecommons.org/publicdomain/zero/1.0/).
|
|
@ -0,0 +1,600 @@
|
|||
/**
|
||||
* \file emscripten.c
|
||||
* Emscripten example of using the single-file \c zstddeclib. Draws a rotating
|
||||
* textured quad with data from the in-line Zstd compressed DXT1 texture (DXT1
|
||||
* being hardware compression, further compressed with Zstd).
|
||||
* \n
|
||||
* Compile using:
|
||||
* \code
|
||||
* export CC_FLAGS="-Wall -Wextra -Werror -Os -g0 -flto --llvm-lto 3 -lGL -DNDEBUG=1"
|
||||
* export EM_FLAGS="-s WASM=1 -s ENVIRONMENT=web --shell-file shell.html --closure 1"
|
||||
* emcc $CC_FLAGS $EM_FLAGS -o out.html emscripten.c
|
||||
* \endcode
|
||||
*
|
||||
* \author Carl Woffenden, Numfum GmbH (released under a CC0 license)
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <emscripten/emscripten.h>
|
||||
#include <emscripten/html5.h>
|
||||
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
|
||||
#include "../zstddeclib.c"
|
||||
|
||||
//************************* Test Data (DXT texture) **************************/
|
||||
|
||||
/**
|
||||
* Zstd compressed DXT1 256x256 texture source.
|
||||
* \n
|
||||
* See \c testcard.png for the original.
|
||||
*/
|
||||
static uint8_t const srcZstd[] = {
|
||||
0x28, 0xb5, 0x2f, 0xfd, 0x60, 0x00, 0x7f, 0x6d, 0x61, 0x00, 0x0a, 0x66,
|
||||
0xec, 0x17, 0x48, 0x60, 0x1c, 0x5a, 0xc9, 0x5d, 0x1a, 0x38, 0x07, 0xe8,
|
||||
0xc5, 0x82, 0x99, 0x68, 0xe6, 0x95, 0x45, 0x58, 0x0d, 0x0c, 0xf3, 0x36,
|
||||
0xc8, 0xd9, 0x0f, 0x46, 0x2d, 0x68, 0x11, 0xf8, 0x31, 0x10, 0xa1, 0x1a,
|
||||
0x2f, 0x99, 0x5c, 0x84, 0xfd, 0x92, 0x02, 0xe6, 0x3b, 0x44, 0x9b, 0x01,
|
||||
0x5d, 0x92, 0xff, 0x38, 0x26, 0x00, 0x6a, 0x6b, 0xc3, 0x53, 0xb2, 0x0c,
|
||||
0x25, 0xf3, 0xd8, 0x59, 0x68, 0x9b, 0x14, 0x8a, 0x89, 0x75, 0x18, 0x03,
|
||||
0x1d, 0xc9, 0x0f, 0x63, 0x01, 0x73, 0x01, 0x72, 0x01, 0x4f, 0x66, 0x31,
|
||||
0x58, 0x0f, 0x97, 0x4b, 0x0c, 0x4c, 0x06, 0xac, 0x07, 0x0b, 0x68, 0xd4,
|
||||
0xad, 0x80, 0x64, 0x13, 0x74, 0xa1, 0x12, 0x16, 0x58, 0xcf, 0x1a, 0x95,
|
||||
0x5f, 0x0d, 0x26, 0x55, 0xd0, 0x9c, 0xf4, 0x52, 0x35, 0x2e, 0x20, 0xc1,
|
||||
0x06, 0x69, 0x03, 0x0a, 0x93, 0x83, 0x5e, 0x27, 0x9b, 0x4c, 0x6d, 0xee,
|
||||
0x87, 0x03, 0x30, 0x6c, 0x46, 0xd7, 0x50, 0x5c, 0xca, 0xe6, 0xa6, 0x4d,
|
||||
0xa8, 0xf6, 0xab, 0xd7, 0x0e, 0x27, 0x27, 0x90, 0xc4, 0xb2, 0xd1, 0x10,
|
||||
0xfa, 0x43, 0x82, 0xc8, 0xf2, 0xe5, 0xff, 0xff, 0xd5, 0x52, 0x62, 0x43,
|
||||
0x87, 0x26, 0x2a, 0x05, 0x70, 0x0e, 0xb0, 0x2f, 0xc4, 0x56, 0xef, 0xb5,
|
||||
0xca, 0xb8, 0x53, 0xb7, 0x96, 0x0e, 0xe7, 0x00, 0x2c, 0xa8, 0xda, 0x3b,
|
||||
0x07, 0x70, 0xa7, 0x78, 0x38, 0x60, 0x87, 0x7a, 0x01, 0x3b, 0x75, 0xec,
|
||||
0xfa, 0x77, 0xe2, 0x46, 0x94, 0x61, 0x8e, 0x0d, 0x0c, 0xfb, 0xe7, 0x8b,
|
||||
0x13, 0x50, 0x31, 0xa9, 0x27, 0xcd, 0x27, 0xef, 0x6b, 0xa6, 0xab, 0x9c,
|
||||
0x4d, 0x95, 0x6c, 0x3a, 0xbb, 0x8e, 0x96, 0x92, 0x18, 0x5a, 0x7c, 0x4f,
|
||||
0xff, 0x7b, 0x38, 0xf2, 0xdb, 0x86, 0xde, 0xff, 0x1f, 0x2f, 0x21, 0x86,
|
||||
0x7d, 0xbf, 0x45, 0xd0, 0x6e, 0x77, 0x0a, 0xee, 0x0a, 0xee, 0x14, 0x9a,
|
||||
0xb8, 0x84, 0xf3, 0xac, 0xbe, 0xc8, 0x7f, 0x8d, 0xff, 0xff, 0xcf, 0x2a,
|
||||
0xfb, 0x69, 0xfc, 0xfb, 0xfd, 0x7a, 0x10, 0x22, 0x36, 0xfc, 0xff, 0x3f,
|
||||
0xcf, 0xd0, 0xf1, 0x7f, 0xfe, 0xff, 0x3d, 0x24, 0xdf, 0x78, 0x4a, 0xff,
|
||||
0xda, 0x9c, 0x39, 0xcf, 0xef, 0xe7, 0xfd, 0x52, 0x98, 0xb5, 0x40, 0x92,
|
||||
0xee, 0xdd, 0x99, 0xf5, 0x53, 0x5b, 0x65, 0x6b, 0xb5, 0xd8, 0x7b, 0xae,
|
||||
0xfa, 0xc1, 0x0f, 0x0c, 0x7f, 0x4f, 0x55, 0xa3, 0xad, 0x2c, 0xa0, 0xbd,
|
||||
0xf7, 0x2a, 0x0e, 0xe8, 0xbd, 0xc7, 0x5e, 0xf5, 0xd8, 0x54, 0x9e, 0x56,
|
||||
0xa3, 0xd6, 0x59, 0xd5, 0xfe, 0x1f, 0xc0, 0x30, 0x8c, 0xfc, 0x46, 0x04,
|
||||
0xae, 0x60, 0xbc, 0xe8, 0xcf, 0xec, 0x3d, 0xde, 0xf9, 0xf0, 0xfe, 0xef,
|
||||
0x7d, 0xcc, 0xf7, 0x2b, 0xe5, 0x1b, 0x70, 0xff, 0xff, 0x7e, 0x3f, 0x6e,
|
||||
0xe4, 0x02, 0x07, 0xfc, 0x1b, 0x7a, 0xff, 0xe7, 0x58, 0xfc, 0x7e, 0x3a,
|
||||
0xdc, 0x97, 0xfd, 0x57, 0xef, 0xa3, 0xfc, 0x2a, 0xc7, 0x4d, 0xf3, 0xcb,
|
||||
0x9d, 0xce, 0xac, 0xfe, 0xeb, 0x2e, 0x86, 0xb9, 0x69, 0x54, 0xef, 0xf9,
|
||||
0x55, 0xcf, 0xff, 0x48, 0x24, 0x72, 0x3a, 0x9d, 0x72, 0x2f, 0x2f, 0x2f,
|
||||
0xff, 0x3f, 0xe7, 0x01, 0x6c, 0x4d, 0x6c, 0xcd, 0x2d, 0x5b, 0x53, 0xb7,
|
||||
0x59, 0x22, 0x08, 0x0b, 0xa7, 0x92, 0x15, 0x75, 0x93, 0xb0, 0x5d, 0xaf,
|
||||
0x2a, 0x63, 0x95, 0x1d, 0x25, 0xd2, 0xd2, 0xa8, 0x1c, 0x84, 0xc9, 0xdc,
|
||||
0x72, 0xba, 0xd7, 0xfc, 0x69, 0xf5, 0xc7, 0x19, 0xa9, 0xbe, 0xfa, 0x26,
|
||||
0x55, 0x25, 0x75, 0xb7, 0x60, 0xa3, 0xd8, 0x68, 0x54, 0xb7, 0x1b, 0xa5,
|
||||
0x54, 0x62, 0xb1, 0x49, 0xde, 0xe2, 0xac, 0xa2, 0xe8, 0x7b, 0xff, 0x5f,
|
||||
0x75, 0x4e, 0xb8, 0xa2, 0xdd, 0x6a, 0xb7, 0xda, 0x6e, 0x2e, 0x04, 0xcd,
|
||||
0x08, 0x2f, 0xec, 0x8e, 0x49, 0xaf, 0x49, 0x6f, 0x8b, 0x4f, 0x2e, 0x1a,
|
||||
0xc5, 0x62, 0x7b, 0x6b, 0x3e, 0x32, 0x3e, 0x32, 0xbe, 0x08, 0x35, 0x4d,
|
||||
0x63, 0x93, 0xa6, 0xc8, 0x42, 0xe6, 0x21, 0xcc, 0x59, 0xc8, 0x4c, 0xe5,
|
||||
0x86, 0xe1, 0x03, 0x06, 0xa4, 0xec, 0xff, 0xb7, 0x78, 0x7e, 0x62, 0x43,
|
||||
0xc7, 0x2c, 0x50, 0x30, 0x4a, 0xc8, 0x9b, 0xf3, 0xbf, 0xe6, 0x62, 0xa0,
|
||||
0x50, 0xa6, 0x9c, 0xe3, 0x6e, 0x5b, 0xaf, 0x77, 0x8b, 0xbb, 0xe1, 0x70,
|
||||
0xaa, 0xaa, 0xaa, 0x92, 0xb4, 0x52, 0xad, 0x14, 0x87, 0x93, 0x0b, 0xe6,
|
||||
0x82, 0x39, 0x11, 0xb9, 0x20, 0x9a, 0x16, 0x34, 0x22, 0x68, 0xb2, 0x68,
|
||||
0xb2, 0x76, 0xd3, 0xe8, 0x6e, 0xda, 0x6b, 0x62, 0x34, 0x2e, 0x8d, 0xbd,
|
||||
0x2d, 0x3d, 0x6b, 0x4c, 0x26, 0x33, 0xda, 0x33, 0xf3, 0x91, 0x51, 0x46,
|
||||
0x79, 0xbd, 0x3e, 0x39, 0x9f, 0xcf, 0xd2, 0xb8, 0x5c, 0xfa, 0x22, 0xf8,
|
||||
0x8e, 0xb6, 0xbe, 0x08, 0x40, 0x14, 0x49, 0x40, 0x14, 0xf2, 0x0c, 0x2d,
|
||||
0x30, 0x85, 0x3c, 0x63, 0x29, 0xd3, 0x98, 0x85, 0x6c, 0xb5, 0xdb, 0xad,
|
||||
0x5c, 0x63, 0x9e, 0x72, 0xcf, 0x43, 0xe6, 0xaf, 0x77, 0xa6, 0xe2, 0x21,
|
||||
0x0c, 0x4d, 0xd3, 0x49, 0x1e, 0xc2, 0x14, 0x6f, 0xee, 0xdb, 0x7b, 0x7b,
|
||||
0x08, 0xb3, 0xa4, 0x60, 0x3b, 0x9d, 0x6e, 0x8b, 0x37, 0x4b, 0x0a, 0x74,
|
||||
0x35, 0x33, 0xbc, 0xf9, 0x64, 0x85, 0x63, 0x32, 0x29, 0x20, 0x59, 0x0c,
|
||||
0x3c, 0x96, 0x67, 0x62, 0xb7, 0x8a, 0x92, 0x4d, 0xa0, 0xd3, 0xf3, 0xd1,
|
||||
0x85, 0x80, 0x38, 0xcb, 0x64, 0x60, 0xc9, 0xb5, 0xaf, 0x97, 0x8d, 0x20,
|
||||
0x45, 0x28, 0xb8, 0xab, 0xe8, 0xc9, 0x0a, 0x88, 0x1f, 0xd6, 0x47, 0x54,
|
||||
0xf1, 0xd3, 0xfb, 0x62, 0xa7, 0xfd, 0xf2, 0x8b, 0xfd, 0xb6, 0xe4, 0x2e,
|
||||
0xb6, 0x91, 0x73, 0x1c, 0xd0, 0x7b, 0xba, 0x83, 0xc9, 0xac, 0x51, 0x39,
|
||||
0x92, 0xc5, 0x4f, 0x30, 0x1e, 0x2e, 0xd5, 0xf1, 0xa8, 0xa6, 0xa5, 0x80,
|
||||
0x70, 0xb9, 0xbc, 0xb7, 0xc2, 0x52, 0x32, 0x6c, 0xe3, 0x3d, 0xed, 0x41,
|
||||
0xa4, 0x4b, 0x31, 0x2a, 0xe6, 0x62, 0x11, 0x19, 0x95, 0x73, 0x1d, 0xbf,
|
||||
0xe1, 0x6c, 0xfc, 0x47, 0x75, 0x6c, 0x37, 0x63, 0x02, 0xf8, 0x34, 0x40,
|
||||
0x9a, 0x00, 0x1d, 0xf7, 0x32, 0x56, 0x77, 0xda, 0x5b, 0x9f, 0x9f, 0x0f,
|
||||
0xbb, 0x91, 0x5b, 0xbd, 0xe7, 0x58, 0x82, 0x4a, 0x20, 0xcd, 0x4f, 0x47,
|
||||
0x15, 0xf3, 0x51, 0xf1, 0x43, 0x51, 0x10, 0x96, 0xae, 0xba, 0xf7, 0x21,
|
||||
0x50, 0xef, 0x55, 0x27, 0x0c, 0x1f, 0xe0, 0x54, 0xf8, 0xc9, 0x69, 0xef,
|
||||
0xb9, 0x53, 0xf7, 0x83, 0x73, 0x9d, 0xce, 0x86, 0x07, 0x83, 0x44, 0x61,
|
||||
0x37, 0x35, 0x35, 0x33, 0x4e, 0x33, 0x33, 0x3e, 0x9f, 0x50, 0x48, 0x24,
|
||||
0xe6, 0xd0, 0x79, 0x49, 0xc3, 0x2d, 0xa0, 0x46, 0x31, 0x9a, 0x72, 0xc3,
|
||||
0x84, 0xff, 0x7a, 0x95, 0xbb, 0x00, 0x22, 0xcc, 0x14, 0x00, 0x04, 0xac,
|
||||
0x60, 0x64, 0x86, 0xe4, 0x6f, 0xb1, 0x58, 0xf4, 0xdc, 0xb0, 0x05, 0x00,
|
||||
0x72, 0x38, 0xf8, 0xce, 0xce, 0x8e, 0xcf, 0x37, 0x33, 0x43, 0x24, 0x0a,
|
||||
0x85, 0x33, 0x35, 0x35, 0x37, 0xc2, 0xa8, 0x28, 0x27, 0x1b, 0x9d, 0xce,
|
||||
0x29, 0x18, 0xe4, 0xfc, 0x09, 0x53, 0xa5, 0x51, 0xad, 0x74, 0x79, 0x7b,
|
||||
0xb5, 0x4a, 0x65, 0x94, 0x36, 0x89, 0xf5, 0x62, 0x9b, 0xd8, 0x9a, 0xe8,
|
||||
0x2b, 0xff, 0xa1, 0x73, 0xfe, 0x2e, 0x2c, 0x41, 0x45, 0x37, 0xef, 0x6e,
|
||||
0x7b, 0x38, 0xca, 0xa5, 0xd2, 0xb8, 0x1c, 0x3b, 0x96, 0x21, 0xbb, 0x5d,
|
||||
0xad, 0xd1, 0xdb, 0x1c, 0xf6, 0x5e, 0x4b, 0xd9, 0x59, 0x1b, 0x67, 0xf5,
|
||||
0x7a, 0x7c, 0x9a, 0x91, 0x3e, 0x8e, 0xe3, 0xee, 0x7b, 0xb9, 0xa4, 0xb9,
|
||||
0xf5, 0x70, 0xee, 0x1d, 0x4e, 0x4f, 0xcc, 0xd6, 0x7b, 0x07, 0x71, 0x48,
|
||||
0xf2, 0x06, 0xd0, 0x10, 0x82, 0x21, 0xe4, 0x55, 0x2e, 0xa5, 0x1d, 0xbe,
|
||||
0x48, 0x8c, 0x69, 0xbb, 0x24, 0x40, 0x68, 0x9b, 0xba, 0x5a, 0x5a, 0xa7,
|
||||
0xe2, 0xd1, 0xac, 0xc2, 0xd8, 0x87, 0x9c, 0xe3, 0x78, 0xee, 0xa6, 0xcd,
|
||||
0xdd, 0x68, 0x6e, 0xdd, 0xdb, 0x2e, 0xc6, 0xbb, 0x8b, 0xe9, 0xc1, 0xd9,
|
||||
0xf6, 0x62, 0x7e, 0x72, 0x7e, 0x72, 0x34, 0xe4, 0x68, 0xc8, 0x11, 0x32,
|
||||
0x10, 0x32, 0x20, 0x32, 0xf0, 0x12, 0x19, 0x90, 0xe0, 0x45, 0x91, 0xe0,
|
||||
0x25, 0x83, 0xba, 0xc9, 0x20, 0x26, 0x87, 0xa5, 0x72, 0xa9, 0x64, 0x72,
|
||||
0x80, 0x21, 0x54, 0x13, 0xeb, 0x29, 0x18, 0x42, 0x34, 0x84, 0x94, 0x34,
|
||||
0x84, 0xa4, 0x1d, 0xa4, 0x1d, 0x82, 0x1c, 0xef, 0xaf, 0x0e, 0x63, 0x47,
|
||||
0x6d, 0x10, 0xb9, 0xec, 0xd8, 0x2d, 0x3b, 0x9e, 0x21, 0xb1, 0x67, 0x48,
|
||||
0x34, 0x24, 0x1a, 0x52, 0xdb, 0xa4, 0x6d, 0x62, 0xd3, 0xfa, 0xff, 0xfa,
|
||||
0x03, 0x34, 0xef, 0x9d, 0xc9, 0xe1, 0x5d, 0xec, 0xf5, 0xf1, 0x79, 0x8c,
|
||||
0x97, 0xd2, 0x24, 0xc1, 0x2d, 0xe0, 0x39, 0x16, 0x1e, 0xa9, 0x41, 0xc3,
|
||||
0xbf, 0x4a, 0xd9, 0x3c, 0xea, 0x77, 0x96, 0x55, 0xe6, 0x95, 0xc3, 0xf1,
|
||||
0x8e, 0x7b, 0x4f, 0xad, 0x61, 0xf8, 0xe7, 0x01, 0xad, 0x46, 0xf5, 0x2c,
|
||||
0xac, 0x55, 0x2c, 0x94, 0xaa, 0x46, 0xfb, 0x5e, 0xcd, 0xaa, 0x1f, 0x78,
|
||||
0x4f, 0x2f, 0xd1, 0xc9, 0x02, 0xd6, 0x2c, 0x67, 0xef, 0x3f, 0x54, 0xab,
|
||||
0xda, 0x03, 0x79, 0x1f, 0xab, 0xfd, 0x0c, 0x38, 0x3c, 0xbc, 0xe1, 0xd5,
|
||||
0x01, 0xf6, 0xfb, 0xfb, 0xf1, 0x70, 0xee, 0xfd, 0x90, 0x13, 0x97, 0xc4,
|
||||
0xbc, 0x08, 0xe7, 0x4b, 0x88, 0x34, 0xf7, 0x56, 0x1e, 0x0c, 0xdb, 0xe4,
|
||||
0x9c, 0x78, 0xf1, 0xf4, 0x62, 0x4c, 0xb5, 0xf7, 0xdd, 0xd9, 0x4c, 0x5a,
|
||||
0x69, 0xa6, 0x36, 0x27, 0x03, 0xbe, 0x86, 0xc2, 0x72, 0xa2, 0x60, 0x73,
|
||||
0xf1, 0xe0, 0x17, 0x50, 0xb5, 0x93, 0x81, 0xac, 0xf1, 0xc9, 0xd4, 0x66,
|
||||
0x73, 0x71, 0xce, 0x63, 0xa8, 0x60, 0x98, 0xda, 0x86, 0x46, 0x72, 0xec,
|
||||
0x54, 0xc1, 0xe6, 0x8a, 0x10, 0x23, 0x2d, 0x0c, 0xdd, 0x44, 0x0b, 0xa0,
|
||||
0x44, 0xa4, 0x9d, 0x0e, 0x64, 0x31, 0x30, 0x45, 0xb1, 0x97, 0x4c, 0x6d,
|
||||
0x9f, 0x43, 0x99, 0x71, 0xa8, 0x9a, 0xe6, 0xbd, 0x3a, 0xe1, 0xff, 0x7f,
|
||||
0x33, 0x61, 0xf1, 0x48, 0xda, 0x48, 0x28, 0x10, 0x23, 0x9b, 0x24, 0xeb,
|
||||
0xee, 0xbd, 0x35, 0x0e, 0x6e, 0x75, 0x23, 0x20, 0xd6, 0x95, 0x8c, 0xa5,
|
||||
0x24, 0xec, 0x44, 0x67, 0x52, 0x2e, 0x78, 0x2a, 0xba, 0x3e, 0x3a, 0x4e,
|
||||
0xc9, 0x5c, 0x23, 0xb0, 0xd5, 0xfb, 0x29, 0xaf, 0x9a, 0x0b, 0x90, 0x89,
|
||||
0xd8, 0xec, 0xa9, 0xa8, 0x13, 0xfc, 0x22, 0xfa, 0xf2, 0x74, 0x2f, 0x4e,
|
||||
0x35, 0xb0, 0x6d, 0x6c, 0xfd, 0xc4, 0xfe, 0xd0, 0x98, 0x3d, 0xe5, 0x43,
|
||||
0x0a, 0xd0, 0x33, 0x26, 0x3b, 0x12, 0x7d, 0x65, 0xa1, 0xff, 0xff, 0x6f,
|
||||
0x53, 0x0e, 0x28, 0x84, 0xa7, 0xa2, 0x2e, 0xf8, 0x4a, 0xb6, 0xa1, 0x47,
|
||||
0xf5, 0xd0, 0x13, 0x9d, 0xd9, 0x50, 0xef, 0x9f, 0x31, 0xb4, 0x13, 0x67,
|
||||
0xf9, 0x0a, 0x99, 0xb5, 0x80, 0xec, 0x4a, 0x1a, 0x59, 0x21, 0x3b, 0xce,
|
||||
0xc5, 0x7e, 0x96, 0x85, 0x92, 0xc5, 0xb0, 0x75, 0xaa, 0x41, 0x16, 0xa9,
|
||||
0xd3, 0xde, 0x13, 0x7d, 0xd9, 0x61, 0x30, 0x1c, 0x73, 0x61, 0x54, 0x90,
|
||||
0xcf, 0xd4, 0xe8, 0xfe, 0xbf, 0xbf, 0x36, 0x57, 0x26, 0x38, 0xab, 0x06,
|
||||
0xc4, 0x7e, 0x3c, 0x5b, 0x35, 0xd2, 0x7c, 0x2c, 0x0a, 0x82, 0xf2, 0xa8,
|
||||
0xf2, 0x8b, 0x48, 0xa9, 0x90, 0x00, 0x01, 0x10, 0x10, 0x14, 0x04, 0x00,
|
||||
0x32, 0xa2, 0x06, 0x82, 0x28, 0x90, 0xc2, 0xb4, 0x85, 0xda, 0x01, 0x52,
|
||||
0xe9, 0x18, 0x85, 0x60, 0x00, 0x00, 0x20, 0x0c, 0x00, 0x14, 0x41, 0x00,
|
||||
0x40, 0xa0, 0x04, 0x40, 0x00, 0x80, 0x50, 0x03, 0x01, 0x10, 0x18, 0x01,
|
||||
0x80, 0xd4, 0x14, 0x99, 0x01, 0xfd, 0x07, 0xf8, 0x16, 0x0e, 0xd9, 0x5d,
|
||||
0xa3, 0x70, 0xfe, 0xda, 0x17, 0xfa, 0xce, 0x46, 0x9a, 0x99, 0x81, 0x1a,
|
||||
0x39, 0xba, 0x63, 0xb1, 0x18, 0x11, 0x58, 0xd7, 0xc7, 0xba, 0x03, 0x3e,
|
||||
0x01, 0xf2, 0xf9, 0x4e, 0x12, 0xa3, 0x50, 0x7b, 0xaf, 0x7b, 0x60, 0x5c,
|
||||
0x83, 0x23, 0xd2, 0x60, 0x27, 0x84, 0xad, 0xb8, 0x02, 0xed, 0xfe, 0xb4,
|
||||
0x9c, 0x08, 0x9f, 0x49, 0xae, 0x55, 0x02, 0x94, 0xc0, 0x1b, 0x90, 0x75,
|
||||
0x0b, 0x90, 0xc4, 0xc7, 0x43, 0x9e, 0x67, 0x25, 0x70, 0x61, 0x0d, 0xb8,
|
||||
0x7a, 0x97, 0x43, 0xfc, 0xd1, 0x7e, 0x68, 0xed, 0x03, 0xb7, 0x1e, 0x75,
|
||||
0xe9, 0x4d, 0x7a, 0x23, 0x18, 0x37, 0x63, 0x6f, 0xab, 0x5f, 0x7c, 0x5b,
|
||||
0x1c, 0x05, 0xdf, 0x3f, 0x00, 0x86, 0x37, 0xa0, 0xfa, 0x0c, 0xe0, 0xed,
|
||||
0x35, 0x35, 0x2f, 0xd8, 0xd1, 0x75, 0xba, 0x37, 0x34, 0x7e, 0xb0, 0x84,
|
||||
0x2a, 0x01, 0x0c, 0x98, 0xed, 0x47, 0xf9, 0x86, 0x81, 0x74, 0x00, 0x5d,
|
||||
0x8b, 0x4c, 0x18, 0x8a, 0x31, 0xcd, 0xae, 0x07, 0x44, 0xb5, 0xd5, 0x07,
|
||||
0xa0, 0xdf, 0xf4, 0xfa, 0xa6, 0x42, 0xd0, 0x4f, 0x17, 0xd8, 0xdf, 0xb6,
|
||||
0x34, 0x44, 0xe3, 0x01, 0xc4, 0xb6, 0x2d, 0xb5, 0x56, 0xc6, 0x2a, 0x1f,
|
||||
0x05, 0x6c, 0x35, 0xe0, 0x09, 0x31, 0xef, 0x60, 0xfe, 0xaf, 0x07, 0x80,
|
||||
0x32, 0xa0, 0xe9, 0xd3, 0x96, 0x45, 0xa7, 0xaa, 0xb6, 0xfb, 0x03, 0x10,
|
||||
0xe3, 0x97, 0x96, 0x8d, 0x3a, 0x01, 0xdd, 0x58, 0x58, 0x78, 0x00, 0xab,
|
||||
0xff, 0x06, 0xa0, 0xd6, 0x01, 0x58, 0x08, 0xb7, 0xdc, 0x2d, 0xa7, 0xfb,
|
||||
0x22, 0xa8, 0x67, 0x00, 0xe3, 0xcf, 0x82, 0x43, 0xfc, 0x96, 0x1b, 0x40,
|
||||
0x63, 0xcf, 0x9d, 0x42, 0x5d, 0x66, 0x40, 0xaa, 0xaf, 0x28, 0x94, 0xd3,
|
||||
0x2a, 0xd4, 0x02, 0x13, 0xd2, 0xdf, 0x03, 0x9c, 0x60, 0x6b, 0x16, 0x94,
|
||||
0xb4, 0xbe, 0x62, 0xc2, 0x35, 0x60, 0x45, 0x09, 0x23, 0x5a, 0xe0, 0x85,
|
||||
0xb3, 0x03, 0x50, 0x68, 0x0c, 0x20, 0xa5, 0xf9, 0x94, 0xd2, 0x35, 0x80,
|
||||
0xad, 0x4c, 0x4e, 0x40, 0x41, 0x97, 0x92, 0x75, 0xbe, 0x0c, 0x03, 0x50,
|
||||
0x85, 0x08, 0xaf, 0x36, 0x00, 0x68, 0xaf, 0x09, 0xb3, 0x0c, 0x20, 0x4f,
|
||||
0x81, 0x6a, 0x6a, 0xf5, 0x0d, 0x70, 0x69, 0x00, 0x4c, 0xb4, 0x0f, 0x59,
|
||||
0xe7, 0x31, 0x0a, 0x45, 0x9f, 0xde, 0x90, 0xd6, 0x38, 0x80, 0x6b, 0x2c,
|
||||
0xb9, 0x2f, 0xd4, 0x01, 0x40, 0x14, 0xd5, 0xed, 0x8e, 0x01, 0x53, 0xbf,
|
||||
0x03, 0x18, 0x1e, 0xb0, 0xc1, 0x85, 0x32, 0xec, 0x78, 0x2b, 0xf0, 0xbb,
|
||||
0xbb, 0x6c, 0xf3, 0x4d, 0xdc, 0x73, 0x40, 0xfd, 0x10, 0x09, 0x9e, 0x20,
|
||||
0xe2, 0x12, 0x8c, 0xe0, 0xd2, 0xed, 0x80, 0x6b, 0xcc, 0x78, 0x20, 0x03,
|
||||
0xd0, 0x5e, 0x06, 0xf4, 0xb0, 0xc4, 0x0e, 0x15, 0x1d, 0x80, 0xb4, 0x76,
|
||||
0xdf, 0x49, 0x03, 0x50, 0x82, 0xad, 0xda, 0x8b, 0x5a, 0x61, 0xc2, 0x5e,
|
||||
0xb5, 0x1e, 0x46, 0xc0, 0xde, 0xaa, 0x0e, 0x15, 0x06, 0xd2, 0xf4, 0xb2,
|
||||
0xd1, 0xed, 0x38, 0x0a, 0x03, 0x18, 0x33, 0x1a, 0x80, 0x61, 0x3e, 0xec,
|
||||
0x7c, 0x74, 0xa8, 0x1d, 0x80, 0x1a, 0xce, 0x25, 0x1d, 0x41, 0xd1, 0xc1,
|
||||
0x03, 0x28, 0xb5, 0xaf, 0x72, 0x9c, 0x59, 0x7a, 0xe1, 0x7d, 0xc0, 0xa5,
|
||||
0x08, 0x1e, 0x18, 0x24, 0xfa, 0xbd, 0x99, 0x4a, 0x31, 0xa0, 0xea, 0xee,
|
||||
0xf8, 0x36, 0x60, 0x98, 0xc9, 0x10, 0xd1, 0xa7, 0x35, 0x00, 0x8d, 0x40,
|
||||
0x8e, 0x5a, 0x35, 0x0f, 0x80, 0xb1, 0xd4, 0x32, 0x79, 0x40, 0x34, 0x05,
|
||||
0x7e, 0x98, 0xc6, 0x80, 0x3e, 0x90, 0x01, 0x65, 0xf4, 0x80, 0x73, 0x08,
|
||||
0x64, 0xd7, 0x36, 0xc1, 0x7c, 0xc0, 0x5c, 0x75, 0x00, 0xc5, 0x09, 0x58,
|
||||
0x9c, 0x13, 0x01, 0x72, 0x37, 0x9b, 0x79, 0xe4, 0x05, 0xd1, 0x01, 0x04,
|
||||
0x98, 0x08, 0x74, 0xfd, 0xfc, 0x3f, 0x1c, 0x00, 0x73, 0x01, 0xfc, 0x1c,
|
||||
0xcc, 0x16, 0x43, 0x19, 0x1d, 0xac, 0x61, 0x4b, 0x11, 0xc2, 0xa0, 0xf2,
|
||||
0x01, 0x0b, 0x7b, 0x3b, 0xf4, 0xfc, 0x58, 0x5d, 0x2d, 0x5c, 0x01, 0x8c,
|
||||
0x62, 0x17, 0x78, 0xbe, 0x60, 0x8c, 0x01, 0x6f, 0x91, 0x49, 0x65, 0x54,
|
||||
0x92, 0xe9, 0x01, 0x1e, 0x10, 0x77, 0x35, 0x00, 0xa8, 0xd4, 0xc7, 0x71,
|
||||
0x07, 0xd8, 0xcd, 0xa3, 0x7d, 0x69, 0x20, 0xac, 0x07, 0x00, 0x35, 0xc7,
|
||||
0x62, 0xee, 0x8c, 0x7d, 0x0c, 0xb8, 0x43, 0x0e, 0x00, 0x08, 0xfb, 0xe7,
|
||||
0xec, 0x33, 0x37, 0x04, 0x80, 0x2d, 0x1d, 0xa6, 0x13, 0x34, 0x1b, 0x1d,
|
||||
0xc0, 0xca, 0x00, 0x92, 0xed, 0x2e, 0x56, 0xbe, 0x91, 0x80, 0x0c, 0x88,
|
||||
0xa6, 0x01, 0xdf, 0x7f, 0x90, 0x49, 0xed, 0x0c, 0xe0, 0x08, 0x73, 0x28,
|
||||
0x74, 0xc7, 0xe1, 0xb1, 0x03, 0x5d, 0xc5, 0xab, 0x61, 0x42, 0xdf, 0x03,
|
||||
0x43, 0xf3, 0x35, 0x04, 0xcf, 0xc6, 0x1d, 0x79, 0x07, 0x40, 0x22, 0xe4,
|
||||
0x68, 0x0d, 0x01, 0x95, 0xad, 0x72, 0x69, 0x00, 0x39, 0x9d, 0x53, 0x8f,
|
||||
0x13, 0x0d, 0xb0, 0x29, 0x79, 0x1a, 0x39, 0x20, 0x12, 0x28, 0x9b, 0x02,
|
||||
0x8f, 0x74, 0x90, 0x4c, 0xe8, 0xd1, 0x57, 0xf4, 0x01, 0x44, 0x04, 0xe0,
|
||||
0x0c, 0x82, 0x91, 0xc5, 0x4f, 0x8f, 0xc6, 0x00, 0x43, 0x85, 0x65, 0xc8,
|
||||
0xe6, 0x34, 0x1d, 0x80, 0xc0, 0xca, 0xdb, 0x57, 0x6c, 0x00, 0x72, 0x42,
|
||||
0x5f, 0xd0, 0x49, 0x57, 0x47, 0xd4, 0x97, 0x18, 0x18, 0x80, 0x68, 0x8e,
|
||||
0x0a, 0xf1, 0x6b, 0x34, 0xf1, 0x60, 0x2c, 0x41, 0x29, 0xd3, 0x3d, 0x55,
|
||||
0x95, 0xb1, 0x3c, 0xd4, 0x95, 0x42, 0xef, 0xe7, 0xca, 0x00, 0x2e, 0xce,
|
||||
0x25, 0xc2, 0xca, 0xf5, 0x00, 0x17, 0x3b, 0x8c, 0x42, 0x88, 0x03, 0xde,
|
||||
0x97, 0xe1, 0x3a, 0x74, 0xb0, 0x33, 0xe0, 0x8f, 0x47, 0xeb, 0x2a, 0x5f,
|
||||
0x36, 0x3e, 0x5a, 0xff, 0xc5, 0x80, 0xb9, 0x13, 0xa9, 0x1f, 0xf8, 0x86,
|
||||
0xc9, 0x51, 0xf8, 0x4c, 0xaa, 0xe1, 0x65, 0x80, 0xb0, 0x8b, 0x91, 0xec,
|
||||
0xcc, 0xbf, 0x70, 0x19, 0x98, 0x03, 0x10, 0xf0, 0x38, 0x40, 0xc4, 0x65,
|
||||
0xbe, 0x41, 0xb2, 0x58, 0x3f, 0xe0, 0xcc, 0x0e, 0x08, 0x2b, 0x73, 0xf4,
|
||||
0xdd, 0x86, 0x06, 0xa0, 0xc6, 0x8f, 0x1a, 0x32, 0x66, 0x50, 0x8e, 0xe1,
|
||||
0x59, 0x67, 0x00, 0xed, 0x66, 0x1d, 0xdd, 0xfa, 0x7b, 0xe2, 0x56, 0x89,
|
||||
0xd9, 0xa0, 0x4f, 0x41, 0x94, 0x28, 0xb8, 0xc6, 0xc7, 0x64, 0xde, 0x9b,
|
||||
0x64, 0x44, 0x33, 0x39, 0xb5, 0x6c, 0xb9, 0x42, 0xe7, 0x7e, 0x16, 0xd2,
|
||||
0x01, 0x12, 0x03, 0xb3, 0x48, 0x47, 0x6b, 0x75, 0x26, 0x19, 0x8c, 0xac,
|
||||
0x6f, 0xb1, 0x6f, 0xdc, 0x04, 0x27, 0x3a, 0x00, 0xd6, 0xae, 0xfa, 0xe1,
|
||||
0xf7, 0x30, 0xa4, 0xdb, 0xd5, 0x86, 0x5a, 0x07, 0x11, 0xde, 0xea, 0xf4,
|
||||
0xb0, 0x83, 0x16, 0xbb, 0xc6, 0x00, 0x6e, 0xf2, 0x6b, 0x40, 0x81, 0x01,
|
||||
0x67, 0x0e, 0xa9, 0x82, 0x23, 0x04, 0x34, 0xed, 0x02, 0xf5, 0xe4, 0x0e,
|
||||
0x58, 0xe8, 0x8a, 0x58, 0x57, 0xb0, 0x56, 0x65, 0x3d, 0x40, 0x64, 0x03,
|
||||
0x6e, 0x7b, 0x07, 0x20, 0x99, 0x90, 0x36, 0x95, 0x9f, 0xdf, 0x3d, 0xe8,
|
||||
0x00, 0xa0, 0x57, 0x8f, 0x6d, 0xa4, 0xb3, 0x1d, 0x7a, 0x06, 0xa8, 0x26,
|
||||
0x41, 0xb0, 0x8c, 0x9c, 0x10, 0x85, 0x6c, 0xb4, 0x31, 0xa6, 0x5b, 0x7a,
|
||||
0x10, 0x51, 0x15, 0x3c, 0xa2, 0x42, 0xd3, 0x23, 0x02, 0xc0, 0x17, 0x7e,
|
||||
0x03, 0x28, 0xba, 0xce, 0x9b, 0xae, 0xdd, 0x1a, 0x19, 0xd0, 0x15, 0xac,
|
||||
0xeb, 0x20, 0x3c, 0x3a, 0x00, 0xc6, 0xbb, 0x8a, 0xd5, 0x64, 0xc2, 0x21,
|
||||
0x1d, 0x6c, 0x15, 0x5a, 0xd3, 0x44, 0x98, 0x14, 0x95, 0xb3, 0xb7, 0xdd,
|
||||
0xa6, 0xea, 0x06, 0x54, 0x78, 0xc3, 0xe8, 0x79, 0x9b, 0x86, 0x29, 0x76,
|
||||
0x8b, 0x6b, 0xaa, 0x0d, 0xa8, 0x2f, 0x22, 0x2a, 0xeb, 0x68, 0x81, 0x6c,
|
||||
0x56, 0xfd, 0x79, 0xac, 0x79, 0x4b, 0xa0, 0x01, 0x3f, 0x17, 0x43, 0x82,
|
||||
0xb4, 0xd5, 0x00, 0x14, 0xb7, 0xf5, 0x00, 0xf4, 0x15, 0xa8, 0xd7, 0x4b,
|
||||
0xb1, 0xbc, 0xa8, 0x36, 0x98, 0xf0, 0x8c, 0xe7, 0xf4, 0x7b, 0x35, 0xd8,
|
||||
0xad, 0x0d, 0x5f, 0x9d, 0x96, 0xab, 0xed, 0x48, 0xe2, 0xdc, 0x1c, 0xbe,
|
||||
0x12, 0xfa, 0x41, 0x6f, 0xf5, 0x1e, 0xb6, 0x9f, 0xee, 0xac, 0x21, 0xf4,
|
||||
0xf6, 0x00, 0x38, 0xb1, 0x1f, 0xfd, 0xd0, 0x0e, 0xc7, 0xdd, 0xa0, 0x39,
|
||||
0x07, 0x8c, 0x35, 0x1f, 0x7e, 0xcc, 0xbf, 0xf6, 0xe0, 0x06, 0x66, 0x7d,
|
||||
0x10, 0x3f, 0xc5, 0x3e, 0xde, 0x42, 0xf9, 0x3d, 0x00, 0x54, 0x81, 0x67,
|
||||
0x8a, 0xe6, 0x63, 0x0d, 0x01, 0xd0, 0x31, 0xe0, 0x6e, 0xd0, 0xe1, 0x59,
|
||||
0xf6, 0x1b, 0xf7, 0x0d, 0x52, 0x06, 0x80, 0x61, 0x4f, 0xe8, 0x77, 0xdd,
|
||||
0x6f, 0x48, 0x20, 0x1d, 0xbb, 0x2a, 0x16, 0x8b, 0x54, 0x87, 0x92, 0x83,
|
||||
0xe6, 0x8f, 0x55, 0x59, 0x06, 0x00, 0xe9, 0xc5, 0xce, 0x21, 0x63, 0x87,
|
||||
0xaf, 0x86, 0xcc, 0xba, 0xd6, 0xe7, 0x00, 0xf6, 0x91, 0x92, 0x92, 0xea,
|
||||
0xe8, 0x42, 0x06, 0x69, 0x13, 0xf5, 0x00, 0xd0, 0xb0, 0xa7, 0xcb, 0x4c,
|
||||
0xb0, 0xd2, 0x2d, 0x28, 0x63, 0xf0, 0x6a, 0xc7, 0x80, 0x6a, 0x19, 0xb2,
|
||||
0x66, 0x51, 0xf3, 0xb1, 0x21, 0xa0, 0x48, 0xad, 0x1e, 0x80, 0x62, 0xaf,
|
||||
0x00, 0xf4, 0xa5, 0x4e, 0x83, 0x75, 0x1b, 0xfe, 0x00, 0xc4, 0xcf, 0x55,
|
||||
0xb2, 0x50, 0xa6, 0xeb, 0x38, 0xed, 0x8f, 0xd3, 0x1d, 0x00, 0xf6, 0xe3,
|
||||
0x90, 0x1c, 0x60, 0x9e, 0x8e, 0xeb, 0x0b, 0xba, 0x44, 0x06, 0x68, 0xbb,
|
||||
0xd3, 0x10, 0x63, 0x35, 0xe1, 0x86, 0x5c, 0x5c, 0x2b, 0x85, 0xa6, 0xe7,
|
||||
0x38, 0x2c, 0x18, 0x83, 0x1f, 0x8f, 0x9b, 0x8e, 0x4d, 0x26, 0xcd, 0x34,
|
||||
0x0c, 0x66, 0x1d, 0x70, 0xb7, 0x01, 0xe6, 0x02, 0xa8, 0x51, 0x63, 0xcf,
|
||||
0xbb, 0x03, 0xca, 0x85, 0xc6, 0x9c, 0xf6, 0xf1, 0x51, 0xe0, 0x60, 0x07,
|
||||
0x40, 0x86, 0xf0, 0x1e, 0x6e, 0xef, 0x61, 0x10, 0xd9, 0x36, 0xcc, 0xfc,
|
||||
0x58, 0xe2, 0x37, 0x0d, 0x58, 0xb7, 0xbe, 0xca, 0xc9, 0xd8, 0xcd, 0xaa,
|
||||
0xd5, 0x5b, 0x77, 0x83, 0xcb, 0x0e, 0x30, 0xce, 0xc8, 0xb8, 0xcd, 0xbf,
|
||||
0x1e, 0x63, 0x04, 0xad, 0xb7, 0xcd, 0x43, 0x62, 0x4c, 0xe0, 0x1a, 0xd4,
|
||||
0x21, 0xe2, 0xdd, 0x33, 0xdf, 0xb1, 0xdd, 0xdc, 0x01, 0x22, 0x18, 0xce,
|
||||
0xa1, 0xd8, 0xcb, 0x67, 0xd5, 0x38, 0x4a, 0xbc, 0xd5, 0x81, 0x3d, 0x03,
|
||||
0x98, 0x35, 0x60, 0x41, 0x85, 0x0c, 0x1d, 0xe7, 0x76, 0xf8, 0x11, 0x52,
|
||||
0x76, 0xf6, 0x06, 0x16, 0x02, 0x45, 0xc8, 0xd8, 0x2f, 0x5e, 0x57, 0xbc,
|
||||
0x3b, 0x89, 0x97, 0x09, 0x3e, 0x03, 0x34, 0x1a, 0x9d, 0x37, 0x87, 0x48,
|
||||
0x0a, 0xe0, 0xa7, 0x4f, 0x8c, 0x3a, 0xa2, 0xaf, 0xfd, 0x7b, 0x80, 0xcf,
|
||||
0xe5, 0x18, 0x61, 0x68, 0xba, 0x61, 0x8b, 0x09, 0xaa, 0xa3, 0x0c, 0x47,
|
||||
0x3c, 0x43, 0x03, 0xac, 0xa3, 0x2e, 0x5e, 0x72, 0x0c, 0x80, 0x19, 0x61,
|
||||
0xe6, 0x6e, 0x0e, 0xd9, 0xe8, 0xe8, 0xaf, 0x11, 0x9b, 0x4a, 0x73, 0x7a,
|
||||
0x61, 0x66, 0xf0, 0x54, 0x1d, 0x18, 0xc8, 0x23, 0x36, 0xbf, 0xb5, 0xf4,
|
||||
0x86, 0x54, 0xed, 0xb5, 0x91, 0xee, 0xb8, 0xbc, 0xde, 0xc3, 0x87, 0x9b,
|
||||
0x2f, 0x81, 0xf2, 0xee, 0xa3, 0xec, 0x02
|
||||
};
|
||||
|
||||
/**
|
||||
* Uncompressed size of \c #srcZstd.
|
||||
*/
|
||||
#define DXT1_256x256 32768
|
||||
|
||||
/**
|
||||
* Destination for decoding \c #srcZstd.
|
||||
*/
|
||||
static uint8_t dstDxt1[DXT1_256x256] = {};
|
||||
|
||||
#ifndef ZSTD_VERSION_MAJOR
|
||||
/**
|
||||
* For the case where the decompression library hasn't been included we add a
|
||||
* dummy function to fake the process and stop the buffers being optimised out.
|
||||
*/
|
||||
size_t ZSTD_decompress(void* dst, size_t dstLen, const void* src, size_t srcLen) {
|
||||
return (memcmp(dst, src, (srcLen < dstLen) ? srcLen : dstLen)) ? dstLen : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
//*************************** Program and Shaders ***************************/
|
||||
|
||||
/**
|
||||
* Program object ID.
|
||||
*/
|
||||
static GLuint progId = 0;
|
||||
|
||||
/**
|
||||
* Vertex shader ID.
|
||||
*/
|
||||
static GLuint vertId = 0;
|
||||
|
||||
/**
|
||||
* Fragment shader ID.
|
||||
*/
|
||||
static GLuint fragId = 0;
|
||||
|
||||
//********************************* Uniforms *********************************/
|
||||
|
||||
/**
|
||||
* Quad rotation angle ID.
|
||||
*/
|
||||
static GLint uRotId = -1;
|
||||
|
||||
/**
|
||||
* Draw colour ID.
|
||||
*/
|
||||
static GLint uTx0Id = -1;
|
||||
|
||||
//******************************* Shader Source ******************************/
|
||||
|
||||
/**
|
||||
* Vertex shader to draw texture mapped polys with an applied rotation.
|
||||
*/
|
||||
static GLchar const vertShader2D[] =
|
||||
#if GL_ES_VERSION_2_0
|
||||
"#version 100\n"
|
||||
"precision mediump float;\n"
|
||||
#else
|
||||
"#version 120\n"
|
||||
#endif
|
||||
"uniform float uRot;" // rotation
|
||||
"attribute vec2 aPos;" // vertex position coords
|
||||
"attribute vec2 aUV0;" // vertex texture UV0
|
||||
"varying vec2 vUV0;" // (passed to fragment shader)
|
||||
"void main() {"
|
||||
" float cosA = cos(radians(uRot));"
|
||||
" float sinA = sin(radians(uRot));"
|
||||
" mat3 rot = mat3(cosA, -sinA, 0.0,"
|
||||
" sinA, cosA, 0.0,"
|
||||
" 0.0, 0.0, 1.0);"
|
||||
" gl_Position = vec4(rot * vec3(aPos, 1.0), 1.0);"
|
||||
" vUV0 = aUV0;"
|
||||
"}";
|
||||
|
||||
/**
|
||||
* Fragment shader for the above polys.
|
||||
*/
|
||||
static GLchar const fragShader2D[] =
|
||||
#if GL_ES_VERSION_2_0
|
||||
"#version 100\n"
|
||||
"precision mediump float;\n"
|
||||
#else
|
||||
"#version 120\n"
|
||||
#endif
|
||||
"uniform sampler2D uTx0;"
|
||||
"varying vec2 vUV0;" // (passed from fragment shader)
|
||||
"void main() {"
|
||||
" gl_FragColor = texture2D(uTx0, vUV0);"
|
||||
"}";
|
||||
|
||||
/**
|
||||
* Helper to compile a shader.
|
||||
*
|
||||
* \param type shader type
|
||||
* \param text shader source
|
||||
* \return the shader ID (or zero if compilation failed)
|
||||
*/
|
||||
static GLuint compileShader(GLenum const type, const GLchar* text) {
|
||||
GLuint shader = glCreateShader(type);
|
||||
if (shader) {
|
||||
glShaderSource (shader, 1, &text, NULL);
|
||||
glCompileShader(shader);
|
||||
GLint compiled;
|
||||
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
|
||||
if (compiled) {
|
||||
return shader;
|
||||
} else {
|
||||
GLint logLen;
|
||||
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &logLen);
|
||||
if (logLen > 1) {
|
||||
GLchar* logStr = malloc(logLen);
|
||||
glGetShaderInfoLog(shader, logLen, NULL, logStr);
|
||||
#ifndef NDEBUG
|
||||
printf("Shader compilation error: %s\n", logStr);
|
||||
#endif
|
||||
free(logStr);
|
||||
}
|
||||
glDeleteShader(shader);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//********************************** Helpers *********************************/
|
||||
|
||||
/**
|
||||
* Vertex position index.
|
||||
*/
|
||||
#define GL_VERT_POSXY_ID 0
|
||||
|
||||
/**
|
||||
* Vertex UV0 index.
|
||||
*/
|
||||
#define GL_VERT_TXUV0_ID 1
|
||||
|
||||
/**
|
||||
* \c GL vec2 storage type.
|
||||
*/
|
||||
struct vec2 {
|
||||
float x;
|
||||
float y;
|
||||
};
|
||||
|
||||
/**
|
||||
* Combined 2D vertex and 2D texture coordinates.
|
||||
*/
|
||||
struct posTex2d {
|
||||
struct vec2 pos;
|
||||
struct vec2 uv0;
|
||||
};
|
||||
|
||||
//****************************************************************************/
|
||||
|
||||
/**
|
||||
* Current quad rotation angle (in degrees, updated per frame).
|
||||
*/
|
||||
static float rotDeg = 0.0f;
|
||||
|
||||
/**
|
||||
* Emscripten (single) GL context.
|
||||
*/
|
||||
static EMSCRIPTEN_WEBGL_CONTEXT_HANDLE glCtx = 0;
|
||||
|
||||
/**
|
||||
* Emscripten resize handler.
|
||||
*/
|
||||
static EM_BOOL resize(int type, const EmscriptenUiEvent* e, void* data) {
|
||||
double surfaceW;
|
||||
double surfaceH;
|
||||
if (emscripten_get_element_css_size ("#canvas", &surfaceW, &surfaceH) == EMSCRIPTEN_RESULT_SUCCESS) {
|
||||
emscripten_set_canvas_element_size("#canvas", surfaceW, surfaceH);
|
||||
if (glCtx) {
|
||||
glViewport(0, 0, (int) surfaceW, (int) surfaceH);
|
||||
}
|
||||
}
|
||||
(void) type;
|
||||
(void) data;
|
||||
(void) e;
|
||||
return EM_FALSE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Boilerplate to create a WebGL context.
|
||||
*/
|
||||
static EM_BOOL initContext() {
|
||||
// Default attributes
|
||||
EmscriptenWebGLContextAttributes attr;
|
||||
emscripten_webgl_init_context_attributes(&attr);
|
||||
if ((glCtx = emscripten_webgl_create_context("#canvas", &attr))) {
|
||||
// Bind the context and fire a resize to get the initial size
|
||||
emscripten_webgl_make_context_current(glCtx);
|
||||
emscripten_set_resize_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, NULL, EM_FALSE, resize);
|
||||
resize(0, NULL, NULL);
|
||||
return EM_TRUE;
|
||||
}
|
||||
return EM_FALSE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called once per frame (clears the screen and draws the rotating quad).
|
||||
*/
|
||||
static void tick() {
|
||||
glClearColor(1.0f, 0.0f, 1.0f, 1.0f);
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
|
||||
if (uRotId >= 0) {
|
||||
glUniform1f(uRotId, rotDeg);
|
||||
rotDeg += 0.1f;
|
||||
if (rotDeg >= 360.0f) {
|
||||
rotDeg -= 360.0f;
|
||||
}
|
||||
}
|
||||
|
||||
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0);
|
||||
glFlush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the GL context, shaders and quad data, decompresses the Zstd data
|
||||
* and 'uploads' the resulting texture.
|
||||
*
|
||||
* As a (naive) comparison, removing Zstd and building with "-Os -g0 s WASM=1
|
||||
* -lGL emscripten.c" results in a 15kB WebAssembly file; re-adding Zstd
|
||||
* increases the Wasm by 26kB.
|
||||
*/
|
||||
int main() {
|
||||
if (initContext()) {
|
||||
// Compile shaders and set the initial GL state
|
||||
if ((progId = glCreateProgram())) {
|
||||
vertId = compileShader(GL_VERTEX_SHADER, vertShader2D);
|
||||
fragId = compileShader(GL_FRAGMENT_SHADER, fragShader2D);
|
||||
|
||||
glBindAttribLocation(progId, GL_VERT_POSXY_ID, "aPos");
|
||||
glBindAttribLocation(progId, GL_VERT_TXUV0_ID, "aUV0");
|
||||
|
||||
glAttachShader(progId, vertId);
|
||||
glAttachShader(progId, fragId);
|
||||
glLinkProgram (progId);
|
||||
glUseProgram (progId);
|
||||
uRotId = glGetUniformLocation(progId, "uRot");
|
||||
uTx0Id = glGetUniformLocation(progId, "uTx0");
|
||||
if (uTx0Id >= 0) {
|
||||
glUniform1i(uTx0Id, 0);
|
||||
}
|
||||
|
||||
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
|
||||
glEnable(GL_BLEND);
|
||||
glDisable(GL_DITHER);
|
||||
|
||||
glCullFace(GL_BACK);
|
||||
glEnable(GL_CULL_FACE);
|
||||
}
|
||||
|
||||
GLuint vertsBuf = 0;
|
||||
GLuint indexBuf = 0;
|
||||
GLuint txName = 0;
|
||||
// Create the textured quad (vert positions then UVs)
|
||||
struct posTex2d verts2d[] = {
|
||||
{{-0.85f, -0.85f}, {0.0f, 0.0f}}, // BL
|
||||
{{ 0.85f, -0.85f}, {1.0f, 0.0f}}, // BR
|
||||
{{-0.85f, 0.85f}, {0.0f, 1.0f}}, // TL
|
||||
{{ 0.85f, 0.85f}, {1.0f, 1.0f}}, // TR
|
||||
};
|
||||
uint16_t index2d[] = {
|
||||
0, 1, 2,
|
||||
2, 1, 3,
|
||||
};
|
||||
glGenBuffers(1, &vertsBuf);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vertsBuf);
|
||||
glBufferData(GL_ARRAY_BUFFER,
|
||||
sizeof(verts2d), verts2d, GL_STATIC_DRAW);
|
||||
glVertexAttribPointer(GL_VERT_POSXY_ID, 2,
|
||||
GL_FLOAT, GL_FALSE, sizeof(struct posTex2d), 0);
|
||||
glVertexAttribPointer(GL_VERT_TXUV0_ID, 2,
|
||||
GL_FLOAT, GL_FALSE, sizeof(struct posTex2d),
|
||||
(void*) offsetof(struct posTex2d, uv0));
|
||||
glGenBuffers(1, &indexBuf);
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuf);
|
||||
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
|
||||
sizeof(index2d), index2d, GL_STATIC_DRAW);
|
||||
glEnableVertexAttribArray(GL_VERT_POSXY_ID);
|
||||
glEnableVertexAttribArray(GL_VERT_TXUV0_ID);
|
||||
|
||||
// Decode the Zstd data and create the texture
|
||||
if (ZSTD_decompress(dstDxt1, DXT1_256x256, srcZstd, sizeof srcZstd) == DXT1_256x256) {
|
||||
glGenTextures(1, &txName);
|
||||
glBindTexture(GL_TEXTURE_2D, txName);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glCompressedTexImage2D(GL_TEXTURE_2D, 0,
|
||||
GL_COMPRESSED_RGB_S3TC_DXT1_EXT,
|
||||
256, 256, 0, DXT1_256x256, dstDxt1);
|
||||
} else {
|
||||
printf("Failed to decode Zstd data\n");
|
||||
}
|
||||
emscripten_set_main_loop(tick, 0, EM_FALSE);
|
||||
emscripten_exit_with_live_runtime();
|
||||
}
|
||||
return EXIT_FAILURE;
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
||||
<meta name="apple-mobile-web-app-capable" content="yes" />
|
||||
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
|
||||
<title>Emscripten Shell</title>
|
||||
<style>
|
||||
body {background:#333; font-family:"Verdana","Helvetica Neue","Helvetica","Arial",sans-serif; margin:1em 0;}
|
||||
#canvas{position:absolute; top:0px; left:0px; border:none; margin:0; width: 100%; height: 100%; overflow: hidden; display: block;}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas id="canvas" oncontextmenu="event.preventDefault()"></canvas>
|
||||
<script>
|
||||
function trace(msg) {
|
||||
console.log(msg);
|
||||
}
|
||||
|
||||
var Module = {
|
||||
print: trace, printErr: trace, setStatus: trace, monitorRunDependencies: trace,
|
||||
canvas: (function() {
|
||||
return document.getElementById("canvas");
|
||||
})(),
|
||||
preRun: [], postRun: [],
|
||||
};
|
||||
</script>
|
||||
{{{ SCRIPT }}}
|
||||
</body>
|
||||
</html>
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
|
@ -0,0 +1,72 @@
|
|||
/**
|
||||
* \file zstddeclib.c
|
||||
* Single-file Zstandard decompressor.
|
||||
*
|
||||
* Generate using:
|
||||
* \code
|
||||
* combine.sh -r ../../lib -r ../../lib/common -r ../../lib/decompress -o zstddeclib.c zstddeclib-in.c
|
||||
* \endcode
|
||||
*/
|
||||
/*
|
||||
* BSD License
|
||||
*
|
||||
* For Zstandard software
|
||||
*
|
||||
* Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* * Neither the name Facebook nor the names of its contributors may be used
|
||||
* to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
/*
|
||||
* Settings to bake for the standalone decompressor.
|
||||
*
|
||||
* Note: It's important that none of these affects 'zstd.h' (only the
|
||||
* implementation files we're amalgamating).
|
||||
*
|
||||
* Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also
|
||||
* defined in mem.h (breaking C99 compatibility).
|
||||
*/
|
||||
#define DEBUGLEVEL 0
|
||||
#define MEM_MODULE
|
||||
#define XXH_NAMESPACE ZSTD_
|
||||
#define XXH_PRIVATE_API
|
||||
#define XXH_INLINE_ALL
|
||||
#define ZSTD_LEGACY_SUPPORT 0
|
||||
#define ZSTD_LIB_COMPRESSION 0
|
||||
#define ZSTD_LIB_DEPRECATED 0
|
||||
#define ZSTD_NOBENCH
|
||||
#define ZSTD_STRIP_ERROR_STRINGS
|
||||
|
||||
#include "debug.c"
|
||||
#include "entropy_common.c"
|
||||
#include "error_private.c"
|
||||
#include "fse_decompress.c"
|
||||
#include "xxhash.c"
|
||||
#include "zstd_common.c"
|
||||
#include "huf_decompress.c"
|
||||
#include "zstd_ddict.c"
|
||||
#include "zstd_decompress.c"
|
||||
#include "zstd_decompress_block.c"
|
|
@ -1,15 +1,26 @@
|
|||
# ################################################################
|
||||
# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under both the BSD-style license (found in the
|
||||
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
# in the COPYING file in the root directory of this source tree).
|
||||
# ################################################################
|
||||
|
||||
ZSTD ?= zstd # requires zstd installation on local system
|
||||
DIFF ?= diff
|
||||
HARNESS_FILES=*.c
|
||||
|
||||
MULTITHREAD_LDFLAGS = -pthread
|
||||
DEBUGFLAGS= -g -DZSTD_DEBUG=1
|
||||
CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
|
||||
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
|
||||
CFLAGS ?= -O3
|
||||
CFLAGS ?= -O2
|
||||
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
|
||||
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
|
||||
-Wstrict-prototypes -Wundef \
|
||||
-Wstrict-aliasing=1 -Wswitch-enum \
|
||||
-Wredundant-decls -Wstrict-prototypes -Wundef \
|
||||
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
|
||||
-Wredundant-decls
|
||||
-std=c99
|
||||
CFLAGS += $(DEBUGFLAGS)
|
||||
CFLAGS += $(MOREFLAGS)
|
||||
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS)
|
||||
|
@ -22,13 +33,22 @@ clean:
|
|||
@$(RM) -rf harness.dSYM
|
||||
|
||||
test: harness
|
||||
@zstd README.md -o tmp.zst
|
||||
#
|
||||
# Testing single-file decompression with educational decoder
|
||||
#
|
||||
@$(ZSTD) README.md -o tmp.zst
|
||||
@./harness tmp.zst tmp
|
||||
@diff -s tmp README.md
|
||||
@$(DIFF) -s tmp README.md
|
||||
@$(RM) -f tmp*
|
||||
@zstd --train harness.c zstd_decompress.c zstd_decompress.h README.md
|
||||
@zstd -D dictionary README.md -o tmp.zst
|
||||
#
|
||||
# Testing dictionary decompression with education decoder
|
||||
#
|
||||
# note : files are presented multiple for training, to reach minimum threshold
|
||||
@$(ZSTD) --train harness.c zstd_decompress.c zstd_decompress.h README.md \
|
||||
harness.c zstd_decompress.c zstd_decompress.h README.md \
|
||||
harness.c zstd_decompress.c zstd_decompress.h README.md
|
||||
@$(ZSTD) -D dictionary README.md -o tmp.zst
|
||||
@./harness tmp.zst tmp dictionary
|
||||
@diff -s tmp README.md
|
||||
@$(DIFF) -s tmp README.md
|
||||
@$(RM) -f tmp* dictionary
|
||||
@make clean
|
||||
@$(MAKE) clean
|
||||
|
|
|
@ -33,7 +33,7 @@ size_t read_file(const char *path, u8 **ptr) {
|
|||
}
|
||||
|
||||
fseek(f, 0L, SEEK_END);
|
||||
size_t size = ftell(f);
|
||||
size_t size = (size_t)ftell(f);
|
||||
rewind(f);
|
||||
|
||||
*ptr = malloc(size);
|
||||
|
|
|
@ -395,7 +395,7 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len,
|
|||
/* this decoder assumes decompression of a single frame */
|
||||
decode_frame(&out, &in, parsed_dict);
|
||||
|
||||
return out.ptr - (u8 *)dst;
|
||||
return (size_t)(out.ptr - (u8 *)dst);
|
||||
}
|
||||
|
||||
/******* FRAME DECODING ******************************************************/
|
||||
|
@ -416,7 +416,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
|
|||
|
||||
static void decode_frame(ostream_t *const out, istream_t *const in,
|
||||
const dictionary_t *const dict) {
|
||||
const u32 magic_number = IO_read_bits(in, 32);
|
||||
const u32 magic_number = (u32)IO_read_bits(in, 32);
|
||||
// Zstandard frame
|
||||
//
|
||||
// "Magic_Number
|
||||
|
@ -497,7 +497,7 @@ static void parse_frame_header(frame_header_t *const header,
|
|||
// 3 Reserved_bit
|
||||
// 2 Content_Checksum_flag
|
||||
// 1-0 Dictionary_ID_flag"
|
||||
const u8 descriptor = IO_read_bits(in, 8);
|
||||
const u8 descriptor = (u8)IO_read_bits(in, 8);
|
||||
|
||||
// decode frame header descriptor into flags
|
||||
const u8 frame_content_size_flag = descriptor >> 6;
|
||||
|
@ -521,7 +521,7 @@ static void parse_frame_header(frame_header_t *const header,
|
|||
//
|
||||
// Bit numbers 7-3 2-0
|
||||
// Field name Exponent Mantissa"
|
||||
u8 window_descriptor = IO_read_bits(in, 8);
|
||||
u8 window_descriptor = (u8)IO_read_bits(in, 8);
|
||||
u8 exponent = window_descriptor >> 3;
|
||||
u8 mantissa = window_descriptor & 7;
|
||||
|
||||
|
@ -541,7 +541,7 @@ static void parse_frame_header(frame_header_t *const header,
|
|||
const int bytes_array[] = {0, 1, 2, 4};
|
||||
const int bytes = bytes_array[dictionary_id_flag];
|
||||
|
||||
header->dictionary_id = IO_read_bits(in, bytes * 8);
|
||||
header->dictionary_id = (u32)IO_read_bits(in, bytes * 8);
|
||||
} else {
|
||||
header->dictionary_id = 0;
|
||||
}
|
||||
|
@ -633,8 +633,8 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
|
|||
//
|
||||
// The next 2 bits represent the Block_Type, while the remaining 21 bits
|
||||
// represent the Block_Size. Format is little-endian."
|
||||
last_block = IO_read_bits(in, 1);
|
||||
const int block_type = IO_read_bits(in, 2);
|
||||
last_block = (int)IO_read_bits(in, 1);
|
||||
const int block_type = (int)IO_read_bits(in, 2);
|
||||
const size_t block_len = IO_read_bits(in, 21);
|
||||
|
||||
switch (block_type) {
|
||||
|
@ -748,8 +748,8 @@ static size_t decode_literals(frame_context_t *const ctx, istream_t *const in,
|
|||
// types"
|
||||
//
|
||||
// size_format takes between 1 and 2 bits
|
||||
int block_type = IO_read_bits(in, 2);
|
||||
int size_format = IO_read_bits(in, 2);
|
||||
int block_type = (int)IO_read_bits(in, 2);
|
||||
int size_format = (int)IO_read_bits(in, 2);
|
||||
|
||||
if (block_type <= 1) {
|
||||
// Raw or RLE literals block
|
||||
|
@ -833,6 +833,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
|
|||
// bits (0-1023)."
|
||||
num_streams = 1;
|
||||
// Fall through as it has the same size format
|
||||
/* fallthrough */
|
||||
case 1:
|
||||
// "4 streams. Both Compressed_Size and Regenerated_Size use 10 bits
|
||||
// (0-1023)."
|
||||
|
@ -1005,7 +1006,7 @@ static const i16 SEQ_MATCH_LENGTH_DEFAULT_DIST[53] = {
|
|||
static const u32 SEQ_LITERAL_LENGTH_BASELINES[36] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
|
||||
12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40,
|
||||
48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65538};
|
||||
48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
|
||||
static const u8 SEQ_LITERAL_LENGTH_EXTRA_BITS[36] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
|
||||
1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
|
||||
|
@ -1021,7 +1022,7 @@ static const u8 SEQ_MATCH_LENGTH_EXTRA_BITS[53] = {
|
|||
2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
|
||||
|
||||
/// Offset decoding is simpler so we just need a maximum code value
|
||||
static const u8 SEQ_MAX_CODES[3] = {35, -1, 52};
|
||||
static const u8 SEQ_MAX_CODES[3] = {35, (u8)-1, 52};
|
||||
|
||||
static void decompress_sequences(frame_context_t *const ctx,
|
||||
istream_t *const in,
|
||||
|
@ -1132,7 +1133,7 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in,
|
|||
// a single 1-bit and then fills the byte with 0-7 0 bits of padding."
|
||||
const int padding = 8 - highest_set_bit(src[len - 1]);
|
||||
// The offset starts at the end because FSE streams are read backwards
|
||||
i64 bit_offset = len * 8 - padding;
|
||||
i64 bit_offset = (i64)(len * 8 - (size_t)padding);
|
||||
|
||||
// "The bitstream starts with initial state values, each using the required
|
||||
// number of bits in their respective accuracy, decoded previously from
|
||||
|
@ -1409,7 +1410,7 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) {
|
|||
|
||||
// get decompressed size from ZSTD frame header
|
||||
{
|
||||
const u32 magic_number = IO_read_bits(&in, 32);
|
||||
const u32 magic_number = (u32)IO_read_bits(&in, 32);
|
||||
|
||||
if (magic_number == 0xFD2FB528U) {
|
||||
// ZSTD frame
|
||||
|
@ -1418,7 +1419,7 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) {
|
|||
|
||||
if (header.frame_content_size == 0 && !header.single_segment_flag) {
|
||||
// Content size not provided, we can't tell
|
||||
return -1;
|
||||
return (size_t)-1;
|
||||
}
|
||||
|
||||
return header.frame_content_size;
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
/******* EXPOSED TYPES ********************************************************/
|
||||
/*
|
||||
* Contains the parsed contents of a dictionary
|
||||
|
@ -39,7 +41,7 @@ size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len);
|
|||
* Return a valid dictionary_t pointer for use with dictionary initialization
|
||||
* or decompression
|
||||
*/
|
||||
dictionary_t* create_dictionary();
|
||||
dictionary_t* create_dictionary(void);
|
||||
|
||||
/*
|
||||
* Parse a provided dictionary blob for use in decompression
|
||||
|
|
|
@ -16,7 +16,7 @@ Distribution of this document is unlimited.
|
|||
|
||||
### Version
|
||||
|
||||
0.3.2 (17/07/19)
|
||||
0.3.3 (16/08/19)
|
||||
|
||||
|
||||
Introduction
|
||||
|
@ -358,6 +358,7 @@ It may be followed by an optional `Content_Checksum`
|
|||
__`Block_Type`__
|
||||
|
||||
The next 2 bits represent the `Block_Type`.
|
||||
`Block_Type` influences the meaning of `Block_Size`.
|
||||
There are 4 block types :
|
||||
|
||||
| Value | 0 | 1 | 2 | 3 |
|
||||
|
@ -384,9 +385,12 @@ There are 4 block types :
|
|||
__`Block_Size`__
|
||||
|
||||
The upper 21 bits of `Block_Header` represent the `Block_Size`.
|
||||
`Block_Size` is the size of the block excluding the header.
|
||||
A block can contain any number of bytes (even zero), up to
|
||||
`Block_Maximum_Decompressed_Size`, which is the smallest of:
|
||||
When `Block_Type` is `Compressed_Block` or `Raw_Block`,
|
||||
`Block_Size` is the size of `Block_Content`, hence excluding `Block_Header`.
|
||||
When `Block_Type` is `RLE_Block`, `Block_Content`’s size is always 1,
|
||||
and `Block_Size` represents the number of times this byte must be repeated.
|
||||
A block can contain and decompress into any number of bytes (even zero),
|
||||
up to `Block_Maximum_Decompressed_Size`, which is the smallest of:
|
||||
- Window_Size
|
||||
- 128 KB
|
||||
|
||||
|
@ -1653,6 +1657,7 @@ or at least provide a meaningful error code explaining for which reason it canno
|
|||
|
||||
Version changes
|
||||
---------------
|
||||
- 0.3.3 : clarifications for field Block_Size
|
||||
- 0.3.2 : remove additional block size restriction on compressed blocks
|
||||
- 0.3.1 : minor clarification regarding offset history update rules
|
||||
- 0.3.0 : minor edits to match RFC8478
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
|
||||
<title>zstd 1.4.3 Manual</title>
|
||||
<title>zstd 1.4.4 Manual</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>zstd 1.4.3 Manual</h1>
|
||||
<h1>zstd 1.4.4 Manual</h1>
|
||||
<hr>
|
||||
<a name="Contents"></a><h2>Contents</h2>
|
||||
<ol>
|
||||
|
@ -324,6 +324,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
|
|||
* ZSTD_c_forceAttachDict
|
||||
* ZSTD_c_literalCompressionMode
|
||||
* ZSTD_c_targetCBlockSize
|
||||
* ZSTD_c_srcSizeHint
|
||||
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
||||
* note : never ever use experimentalParam? names directly;
|
||||
* also, the enums values themselves are unstable and can still change.
|
||||
|
@ -334,6 +335,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
|
|||
ZSTD_c_experimentalParam4=1001,
|
||||
ZSTD_c_experimentalParam5=1002,
|
||||
ZSTD_c_experimentalParam6=1003,
|
||||
ZSTD_c_experimentalParam7=1004,
|
||||
} ZSTD_cParameter;
|
||||
</b></pre><BR>
|
||||
<pre><b>typedef struct {
|
||||
|
@ -1005,14 +1007,23 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||
size_t ZSTD_estimateDCtxSize(void);
|
||||
</b><p> These functions make it possible to estimate memory usage
|
||||
of a future {D,C}Ctx, before its creation.
|
||||
ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
|
||||
It will also consider src size to be arbitrarily "large", which is worst case.
|
||||
If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
|
||||
ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||
Note : CCtx size estimation is only correct for single-threaded compression.
|
||||
</b><p> These functions make it possible to estimate memory usage of a future
|
||||
{D,C}Ctx, before its creation.
|
||||
|
||||
ZSTD_estimateCCtxSize() will provide a budget large enough for any
|
||||
compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
|
||||
this estimate does not include space for a window buffer, so this estimate
|
||||
is guaranteed to be enough for single-shot compressions, but not streaming
|
||||
compressions. It will however assume the input may be arbitrarily large,
|
||||
which is the worst case. If srcSize is known to always be small,
|
||||
ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
|
||||
ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
|
||||
ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
|
||||
ZSTD_CCtxParams_setParameter().
|
||||
|
||||
Note: only single-threaded compression is supported. This function will
|
||||
return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||
</p></pre><BR>
|
||||
|
||||
<pre><b>size_t ZSTD_estimateCStreamSize(int compressionLevel);
|
||||
|
@ -1318,7 +1329,10 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t di
|
|||
</b>/**! ZSTD_initCStream_advanced() :<b>
|
||||
* This function is deprecated, and is approximately equivalent to:
|
||||
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
||||
* ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
|
||||
* // Pseudocode: Set each zstd parameter and leave the rest as-is.
|
||||
* for ((param, value) : params) {
|
||||
* ZSTD_CCtx_setParameter(zcs, param, value);
|
||||
* }
|
||||
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
|
||||
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
|
||||
*
|
||||
|
@ -1338,7 +1352,10 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
|
|||
</b>/**! ZSTD_initCStream_usingCDict_advanced() :<b>
|
||||
* This function is deprecated, and is approximately equivalent to:
|
||||
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
||||
* ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
|
||||
* // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
|
||||
* for ((fParam, value) : fParams) {
|
||||
* ZSTD_CCtx_setParameter(zcs, fParam, value);
|
||||
* }
|
||||
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
|
||||
* ZSTD_CCtx_refCDict(zcs, cdict);
|
||||
*
|
||||
|
|
|
@ -44,8 +44,8 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve
|
|||
* and writes all output produced to the output file.
|
||||
*/
|
||||
size_t const toRead = buffInSize;
|
||||
size_t read;
|
||||
while ((read = fread_orDie(buffIn, toRead, fin))) {
|
||||
for (;;) {
|
||||
size_t read = fread_orDie(buffIn, toRead, fin);
|
||||
/* Select the flush mode.
|
||||
* If the read may not be finished (read == toRead) we use
|
||||
* ZSTD_e_continue. If this is the last chunk, we use ZSTD_e_end.
|
||||
|
@ -76,6 +76,10 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve
|
|||
} while (!finished);
|
||||
CHECK(input.pos == input.size,
|
||||
"Impossible: zstd only returns 0 when the input is completely consumed!");
|
||||
|
||||
if (lastChunk) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_freeCCtx(cctx);
|
||||
|
|
|
@ -34,7 +34,10 @@ static void decompressFile_orDie(const char* fname)
|
|||
*/
|
||||
size_t const toRead = buffInSize;
|
||||
size_t read;
|
||||
size_t lastRet = 0;
|
||||
int isEmpty = 1;
|
||||
while ( (read = fread_orDie(buffIn, toRead, fin)) ) {
|
||||
isEmpty = 0;
|
||||
ZSTD_inBuffer input = { buffIn, read, 0 };
|
||||
/* Given a valid frame, zstd won't consume the last byte of the frame
|
||||
* until it has flushed all of the decompressed data of the frame.
|
||||
|
@ -53,9 +56,24 @@ static void decompressFile_orDie(const char* fname)
|
|||
size_t const ret = ZSTD_decompressStream(dctx, &output , &input);
|
||||
CHECK_ZSTD(ret);
|
||||
fwrite_orDie(buffOut, output.pos, fout);
|
||||
lastRet = ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (isEmpty) {
|
||||
fprintf(stderr, "input is empty\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (lastRet != 0) {
|
||||
/* The last return value from ZSTD_decompressStream did not end on a
|
||||
* frame, but we reached the end of the file! We assume this is an
|
||||
* error, and the input was truncated.
|
||||
*/
|
||||
fprintf(stderr, "EOF before end of stream: %zu\n", lastRet);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ZSTD_freeDCtx(dctx);
|
||||
fclose_orDie(fin);
|
||||
fclose_orDie(fout);
|
||||
|
|
|
@ -164,7 +164,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# elif defined(__ICCARM__) /* IAR Intrinsic */
|
||||
return 31 - __CLZ(val);
|
||||
# else /* Software version */
|
||||
|
@ -244,9 +244,9 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
|||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
@ -260,6 +260,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
|||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||
|
|
|
@ -61,6 +61,13 @@
|
|||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
|
||||
#if defined(__GNUC__)
|
||||
# define UNUSED_ATTR __attribute__((unused))
|
||||
#else
|
||||
# define UNUSED_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
|
@ -127,9 +134,14 @@
|
|||
} \
|
||||
}
|
||||
|
||||
/* vectorization */
|
||||
/* vectorization
|
||||
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
|
||||
#if !defined(__clang__) && defined(__GNUC__)
|
||||
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
|
||||
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||
# else
|
||||
# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
|
||||
# endif
|
||||
#else
|
||||
# define DONT_VECTORIZE
|
||||
#endif
|
||||
|
|
|
@ -308,7 +308,7 @@ If there is an error, the function will return an error code, which can be teste
|
|||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) (size + (size>>7))
|
||||
#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
|
|
|
@ -52,7 +52,9 @@
|
|||
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
|
||||
|
||||
/* check and forward error code */
|
||||
#ifndef CHECK_F
|
||||
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
|
|
|
@ -47,6 +47,39 @@ extern "C" {
|
|||
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
||||
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
||||
|
||||
/* detects whether we are being compiled under msan */
|
||||
#if defined (__has_feature)
|
||||
# if __has_feature(memory_sanitizer)
|
||||
# define MEMORY_SANITIZER 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined (MEMORY_SANITIZER)
|
||||
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
* include the header file... */
|
||||
|
||||
#include <stdint.h> /* intptr_t */
|
||||
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void __msan_unpoison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void __msan_poison(const volatile void *a, size_t size);
|
||||
|
||||
/* Returns the offset of the first (at least partially) poisoned byte in the
|
||||
memory range, or -1 if the whole range is good. */
|
||||
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
||||
#endif
|
||||
|
||||
#if defined (MEMORY_SANITIZER)
|
||||
# define MEM_SKIP_MSAN __attribute__((no_sanitize("memory")))
|
||||
#else
|
||||
# define MEM_SKIP_MSAN
|
||||
#endif
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Basic Types
|
||||
|
|
|
@ -197,8 +197,8 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
|||
static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
|
||||
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
||||
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
#define VECLEN 16
|
||||
#define WILDCOPY_OVERLENGTH 32
|
||||
#define WILDCOPY_VECLEN 16
|
||||
|
||||
typedef enum {
|
||||
ZSTD_no_overlap,
|
||||
|
@ -207,67 +207,58 @@ typedef enum {
|
|||
} ZSTD_overlap_e;
|
||||
|
||||
/*! ZSTD_wildcopy() :
|
||||
* custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
|
||||
* Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
|
||||
* @param ovtype controls the overlap detection
|
||||
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
|
||||
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
|
||||
* The src buffer must be before the dst buffer.
|
||||
*/
|
||||
MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
|
||||
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
|
||||
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
|
||||
{
|
||||
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
|
||||
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
|
||||
if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
else {
|
||||
if ((length & 8) == 0)
|
||||
COPY8(op, ip);
|
||||
do {
|
||||
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
|
||||
|
||||
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
|
||||
/* Handle short offset copies. */
|
||||
do {
|
||||
COPY8(op, ip)
|
||||
} while (op < oend);
|
||||
} else {
|
||||
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
|
||||
/* Separate out the first two COPY16() calls because the copy length is
|
||||
* almost certain to be short, so the branches have different
|
||||
* probabilities.
|
||||
* On gcc-9 unrolling once is +1.6%, twice is +2%, thrice is +1.8%.
|
||||
* On clang-8 unrolling once is +1.4%, twice is +3.3%, thrice is +3%.
|
||||
*/
|
||||
COPY16(op, ip);
|
||||
}
|
||||
while (op < oend);
|
||||
COPY16(op, ip);
|
||||
if (op >= oend) return;
|
||||
do {
|
||||
COPY16(op, ip);
|
||||
COPY16(op, ip);
|
||||
}
|
||||
while (op < oend);
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZSTD_wildcopy_16min() :
|
||||
* same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
|
||||
MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
|
||||
void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
|
||||
/*! ZSTD_wildcopy8() :
|
||||
* The same as ZSTD_wildcopy(), but it can only overwrite 8 bytes, and works for
|
||||
* overlapping buffers that are at least 8 bytes apart.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_wildcopy8(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
|
||||
assert(length >= 8);
|
||||
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
|
||||
|
||||
if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
else {
|
||||
if ((length & 8) == 0)
|
||||
BYTE* const oend = (BYTE*)op + length;
|
||||
do {
|
||||
COPY8(op, ip);
|
||||
do {
|
||||
COPY16(op, ip);
|
||||
}
|
||||
while (op < oend);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = (BYTE*)dstEnd;
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
} while (op < oend);
|
||||
}
|
||||
|
||||
|
||||
|
@ -323,7 +314,7 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
|
|||
_BitScanReverse(&r, val);
|
||||
return (unsigned)r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
||||
return 31 - __builtin_clz(val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# elif defined(__ICCARM__) /* IAR Intrinsic */
|
||||
return 31 - __CLZ(val);
|
||||
# else /* Software version */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,6 +19,7 @@
|
|||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_internal.h"
|
||||
#include "zstd_cwksp.h"
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
# include "zstdmt_compress.h"
|
||||
#endif
|
||||
|
@ -203,6 +204,9 @@ struct ZSTD_CCtx_params_s {
|
|||
size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
|
||||
* No target when targetCBlockSize == 0.
|
||||
* There is no guarantee on compressed block size */
|
||||
int srcSizeHint; /* User's best guess of source size.
|
||||
* Hint is not valid when srcSizeHint == 0.
|
||||
* There is no guarantee that hint is close to actual source size */
|
||||
|
||||
ZSTD_dictAttachPref_e attachDictPref;
|
||||
ZSTD_literalCompressionMode_e literalCompressionMode;
|
||||
|
@ -228,9 +232,7 @@ struct ZSTD_CCtx_s {
|
|||
ZSTD_CCtx_params appliedParams;
|
||||
U32 dictID;
|
||||
|
||||
int workSpaceOversizedDuration;
|
||||
void* workSpace;
|
||||
size_t workSpaceSize;
|
||||
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
|
||||
size_t blockSize;
|
||||
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
|
||||
unsigned long long consumedSrcSize;
|
||||
|
@ -338,26 +340,57 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
|
|||
return (srcSize >> minlog) + 2;
|
||||
}
|
||||
|
||||
/*! ZSTD_safecopyLiterals() :
|
||||
* memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
|
||||
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
|
||||
* large copies.
|
||||
*/
|
||||
static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
|
||||
assert(iend > ilimit_w);
|
||||
if (ip <= ilimit_w) {
|
||||
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
|
||||
op += ilimit_w - ip;
|
||||
ip = ilimit_w;
|
||||
}
|
||||
while (ip < iend) *op++ = *ip++;
|
||||
}
|
||||
|
||||
/*! ZSTD_storeSeq() :
|
||||
* Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
|
||||
* `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
|
||||
* Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
|
||||
* `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
|
||||
* `mlBase` : matchLength - MINMATCH
|
||||
* Allowed to overread literals up to litLimit.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
|
||||
HINT_INLINE UNUSED_ATTR
|
||||
void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
|
||||
{
|
||||
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
|
||||
BYTE const* const litEnd = literals + litLength;
|
||||
#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
|
||||
static const BYTE* g_start = NULL;
|
||||
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
|
||||
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
|
||||
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
|
||||
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
|
||||
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
|
||||
}
|
||||
#endif
|
||||
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
|
||||
/* copy Literals */
|
||||
assert(seqStorePtr->maxNbLit <= 128 KB);
|
||||
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
|
||||
ZSTD_wildcopy(seqStorePtr->lit, literals, (ptrdiff_t)litLength, ZSTD_no_overlap);
|
||||
assert(literals + litLength <= litLimit);
|
||||
if (litEnd <= litLimit_w) {
|
||||
/* Common case we can use wildcopy.
|
||||
* First copy 16 bytes, because literals are likely short.
|
||||
*/
|
||||
assert(WILDCOPY_OVERLENGTH >= 16);
|
||||
ZSTD_copy16(seqStorePtr->lit, literals);
|
||||
if (litLength > 16) {
|
||||
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
|
||||
}
|
||||
} else {
|
||||
ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
|
||||
}
|
||||
seqStorePtr->lit += litLength;
|
||||
|
||||
/* literal Length */
|
||||
|
@ -369,7 +402,7 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
|
|||
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
||||
|
||||
/* match offset */
|
||||
seqStorePtr->sequences[0].offset = offsetCode + 1;
|
||||
seqStorePtr->sequences[0].offset = offCode + 1;
|
||||
|
||||
/* match Length */
|
||||
if (mlBase>0xFFFF) {
|
||||
|
@ -911,7 +944,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
|
|||
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
|
||||
|
||||
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
|
||||
|
||||
|
@ -926,7 +959,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
|
|||
ZSTD_dictContentType_e dictContentType,
|
||||
ZSTD_dictTableLoadMethod_e dtlm,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params,
|
||||
const ZSTD_CCtx_params* params,
|
||||
unsigned long long pledgedSrcSize);
|
||||
|
||||
/* ZSTD_compress_advanced_internal() :
|
||||
|
@ -935,7 +968,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
|
|||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_CCtx_params params);
|
||||
const ZSTD_CCtx_params* params);
|
||||
|
||||
|
||||
/* ZSTD_writeLastEmptyBlock() :
|
||||
|
|
|
@ -70,7 +70,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||
ZSTD_strategy strategy, int disableLiteralCompression,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* workspace, size_t wkspSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const int bmi2)
|
||||
{
|
||||
size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
||||
|
@ -99,10 +99,15 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||
{ HUF_repeat repeat = prevHuf->repeatMode;
|
||||
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
|
||||
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
||||
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
|
||||
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
|
||||
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
|
||||
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
|
||||
cLitSize = singleStream ?
|
||||
HUF_compress1X_repeat(
|
||||
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
||||
255, 11, entropyWorkspace, entropyWorkspaceSize,
|
||||
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
|
||||
HUF_compress4X_repeat(
|
||||
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
||||
255, 11, entropyWorkspace, entropyWorkspaceSize,
|
||||
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
|
||||
if (repeat != HUF_repeat_none) {
|
||||
/* reused the existing table */
|
||||
hType = set_repeat;
|
||||
|
|
|
@ -23,7 +23,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||
ZSTD_strategy strategy, int disableLiteralCompression,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* workspace, size_t wkspSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const int bmi2);
|
||||
|
||||
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
||||
|
|
|
@ -222,7 +222,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* workspace, size_t workspaceSize)
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize)
|
||||
{
|
||||
BYTE* op = (BYTE*)dst;
|
||||
const BYTE* const oend = op + dstCapacity;
|
||||
|
@ -238,7 +238,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
memcpy(nextCTable, prevCTable, prevCTableSize);
|
||||
return 0;
|
||||
case set_basic:
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize)); /* note : could be pre-calculated */
|
||||
return 0;
|
||||
case set_compressed: {
|
||||
S16 norm[MaxSeq + 1];
|
||||
|
@ -252,7 +252,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
|
||||
FORWARD_IF_ERROR(NCountSize);
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));
|
||||
return NCountSize;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* workspace, size_t workspaceSize);
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize);
|
||||
|
||||
size_t ZSTD_encodeSequences(
|
||||
void* dst, size_t dstCapacity,
|
||||
|
|
|
@ -0,0 +1,449 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CWKSP_H
|
||||
#define ZSTD_CWKSP_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_internal.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
|
||||
/* define "workspace is too large" as this number of times larger than needed */
|
||||
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
|
||||
|
||||
/* when workspace is continuously too large
|
||||
* during at least this number of times,
|
||||
* context's memory usage is considered wasteful,
|
||||
* because it's sized to handle a worst case scenario which rarely happens.
|
||||
* In which case, resize it down to free some memory */
|
||||
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
|
||||
|
||||
/*-*************************************
|
||||
* Structures
|
||||
***************************************/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_alloc_objects,
|
||||
ZSTD_cwksp_alloc_buffers,
|
||||
ZSTD_cwksp_alloc_aligned
|
||||
} ZSTD_cwksp_alloc_phase_e;
|
||||
|
||||
/**
|
||||
* Zstd fits all its internal datastructures into a single continuous buffer,
|
||||
* so that it only needs to perform a single OS allocation (or so that a buffer
|
||||
* can be provided to it and it can perform no allocations at all). This buffer
|
||||
* is called the workspace.
|
||||
*
|
||||
* Several optimizations complicate that process of allocating memory ranges
|
||||
* from this workspace for each internal datastructure:
|
||||
*
|
||||
* - These different internal datastructures have different setup requirements:
|
||||
*
|
||||
* - The static objects need to be cleared once and can then be trivially
|
||||
* reused for each compression.
|
||||
*
|
||||
* - Various buffers don't need to be initialized at all--they are always
|
||||
* written into before they're read.
|
||||
*
|
||||
* - The matchstate tables have a unique requirement that they don't need
|
||||
* their memory to be totally cleared, but they do need the memory to have
|
||||
* some bound, i.e., a guarantee that all values in the memory they've been
|
||||
* allocated is less than some maximum value (which is the starting value
|
||||
* for the indices that they will then use for compression). When this
|
||||
* guarantee is provided to them, they can use the memory without any setup
|
||||
* work. When it can't, they have to clear the area.
|
||||
*
|
||||
* - These buffers also have different alignment requirements.
|
||||
*
|
||||
* - We would like to reuse the objects in the workspace for multiple
|
||||
* compressions without having to perform any expensive reallocation or
|
||||
* reinitialization work.
|
||||
*
|
||||
* - We would like to be able to efficiently reuse the workspace across
|
||||
* multiple compressions **even when the compression parameters change** and
|
||||
* we need to resize some of the objects (where possible).
|
||||
*
|
||||
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
|
||||
* abstraction was created. It works as follows:
|
||||
*
|
||||
* Workspace Layout:
|
||||
*
|
||||
* [ ... workspace ... ]
|
||||
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
|
||||
*
|
||||
* The various objects that live in the workspace are divided into the
|
||||
* following categories, and are allocated separately:
|
||||
*
|
||||
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
||||
* so that literally everything fits in a single buffer. Note: if present,
|
||||
* this must be the first object in the workspace, since ZSTD_free{CCtx,
|
||||
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
||||
* required.
|
||||
*
|
||||
* - Fixed size objects: these are fixed-size, fixed-count objects that are
|
||||
* nonetheless "dynamically" allocated in the workspace so that we can
|
||||
* control how they're initialized separately from the broader ZSTD_CCtx.
|
||||
* Examples:
|
||||
* - Entropy Workspace
|
||||
* - 2 x ZSTD_compressedBlockState_t
|
||||
* - CDict dictionary contents
|
||||
*
|
||||
* - Tables: these are any of several different datastructures (hash tables,
|
||||
* chain tables, binary trees) that all respect a common format: they are
|
||||
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
||||
* Their sizes depend on the cparams.
|
||||
*
|
||||
* - Aligned: these buffers are used for various purposes that require 4 byte
|
||||
* alignment, but don't require any initialization before they're used.
|
||||
*
|
||||
* - Buffers: these buffers are used for various purposes that don't require
|
||||
* any alignment or initialization before they're used. This means they can
|
||||
* be moved around at no cost for a new compression.
|
||||
*
|
||||
* Allocating Memory:
|
||||
*
|
||||
* The various types of objects must be allocated in order, so they can be
|
||||
* correctly packed into the workspace buffer. That order is:
|
||||
*
|
||||
* 1. Objects
|
||||
* 2. Buffers
|
||||
* 3. Aligned
|
||||
* 4. Tables
|
||||
*
|
||||
* Attempts to reserve objects of different types out of order will fail.
|
||||
*/
|
||||
typedef struct {
|
||||
void* workspace;
|
||||
void* workspaceEnd;
|
||||
|
||||
void* objectEnd;
|
||||
void* tableEnd;
|
||||
void* tableValidEnd;
|
||||
void* allocStart;
|
||||
|
||||
int allocFailed;
|
||||
int workspaceOversizedDuration;
|
||||
ZSTD_cwksp_alloc_phase_e phase;
|
||||
} ZSTD_cwksp;
|
||||
|
||||
/*-*************************************
|
||||
* Functions
|
||||
***************************************/
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
||||
(void)ws;
|
||||
assert(ws->workspace <= ws->objectEnd);
|
||||
assert(ws->objectEnd <= ws->tableEnd);
|
||||
assert(ws->objectEnd <= ws->tableValidEnd);
|
||||
assert(ws->tableEnd <= ws->allocStart);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
assert(ws->allocStart <= ws->workspaceEnd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Align must be a power of 2.
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
||||
size_t const mask = align - 1;
|
||||
assert((align & mask) == 0);
|
||||
return (size + mask) & ~mask;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
|
||||
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
|
||||
assert(phase >= ws->phase);
|
||||
if (phase > ws->phase) {
|
||||
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
|
||||
phase >= ZSTD_cwksp_alloc_buffers) {
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
}
|
||||
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
||||
phase >= ZSTD_cwksp_alloc_aligned) {
|
||||
/* If unaligned allocations down from a too-large top have left us
|
||||
* unaligned, we need to realign our alloc ptr. Technically, this
|
||||
* can consume space that is unaccounted for in the neededSpace
|
||||
* calculation. However, I believe this can only happen when the
|
||||
* workspace is too large, and specifically when it is too large
|
||||
* by a larger margin than the space that will be consumed. */
|
||||
/* TODO: cleaner, compiler warning friendly way to do this??? */
|
||||
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
|
||||
if (ws->allocStart < ws->tableValidEnd) {
|
||||
ws->tableValidEnd = ws->allocStart;
|
||||
}
|
||||
}
|
||||
ws->phase = phase;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
||||
ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
|
||||
void* alloc;
|
||||
void* bottom = ws->tableEnd;
|
||||
ZSTD_cwksp_internal_advance_phase(ws, phase);
|
||||
alloc = (BYTE *)ws->allocStart - bytes;
|
||||
DEBUGLOG(5, "cwksp: reserving %zd bytes, %zd bytes remaining",
|
||||
bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(alloc >= bottom);
|
||||
if (alloc < bottom) {
|
||||
DEBUGLOG(4, "cwksp: alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
if (alloc < ws->tableValidEnd) {
|
||||
ws->tableValidEnd = alloc;
|
||||
}
|
||||
ws->allocStart = alloc;
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns unaligned memory.
|
||||
*/
|
||||
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
|
||||
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on sizeof(unsigned).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
|
||||
assert((bytes & (sizeof(U32)-1)) == 0);
|
||||
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on sizeof(unsigned). These buffers have the special property that
|
||||
* their values remain constrained, allowing us to re-use them without
|
||||
* memset()-ing them.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
||||
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
||||
void* alloc = ws->tableEnd;
|
||||
void* end = (BYTE *)alloc + bytes;
|
||||
void* top = ws->allocStart;
|
||||
DEBUGLOG(5, "cwksp: reserving table %zd bytes, %zd bytes remaining",
|
||||
bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
assert((bytes & (sizeof(U32)-1)) == 0);
|
||||
ZSTD_cwksp_internal_advance_phase(ws, phase);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(end <= top);
|
||||
if (end > top) {
|
||||
DEBUGLOG(4, "cwksp: table alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->tableEnd = end;
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on sizeof(void*).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
||||
size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
||||
void* start = ws->objectEnd;
|
||||
void* end = (BYTE*)start + roundedBytes;
|
||||
DEBUGLOG(5,
|
||||
"cwksp: reserving object %zd bytes (rounded to %zd), %zd bytes remaining",
|
||||
bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
||||
assert(((size_t)start & (sizeof(void*)-1)) == 0);
|
||||
assert((bytes & (sizeof(void*)-1)) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
/* we must be in the first phase, no advance is possible */
|
||||
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
||||
DEBUGLOG(4, "cwksp: object alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->objectEnd = end;
|
||||
ws->tableEnd = end;
|
||||
ws->tableValidEnd = end;
|
||||
return start;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
||||
|
||||
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the table re-use logic is sound, and that we don't
|
||||
* access table space that we haven't cleaned, we re-"poison" the table
|
||||
* space every time we mark it dirty. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
||||
__msan_poison(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Zero the part of the allocated tables not already marked clean.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
|
||||
}
|
||||
ZSTD_cwksp_mark_tables_clean(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates table allocations.
|
||||
* All other allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing tables!");
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates all buffer, aligned, and table allocations.
|
||||
* Object allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing!");
|
||||
|
||||
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the context re-use logic is sound, and that we don't
|
||||
* access stuff that this compression hasn't initialized, we re-"poison"
|
||||
* the workspace (or at least the non-static, non-table parts of it)
|
||||
* every time we start a new compression. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
|
||||
__msan_poison(ws->tableValidEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ws->allocStart = ws->workspaceEnd;
|
||||
ws->allocFailed = 0;
|
||||
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
|
||||
ws->phase = ZSTD_cwksp_alloc_buffers;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* The provided workspace takes ownership of the buffer [start, start+size).
|
||||
* Any existing values in the workspace are ignored (the previously managed
|
||||
* buffer, if present, must be separately freed).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
||||
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
||||
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
||||
ws->workspace = start;
|
||||
ws->workspaceEnd = (BYTE*)start + size;
|
||||
ws->objectEnd = ws->workspace;
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ws->phase = ZSTD_cwksp_alloc_objects;
|
||||
ZSTD_cwksp_clear(ws);
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
||||
void* workspace = ZSTD_malloc(size, customMem);
|
||||
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
||||
RETURN_ERROR_IF(workspace == NULL, memory_allocation);
|
||||
ZSTD_cwksp_init(ws, workspace, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
||||
DEBUGLOG(4, "cwksp: freeing workspace");
|
||||
ZSTD_free(ws->workspace, customMem);
|
||||
memset(ws, 0, sizeof(ZSTD_cwksp));
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
||||
* is left in an invalid state (src must be re-init()'ed before its used again).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
||||
*dst = *src;
|
||||
memset(src, 0, sizeof(ZSTD_cwksp));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
||||
return (BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace;
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
||||
return ws->allocFailed;
|
||||
}
|
||||
|
||||
/*-*************************************
|
||||
* Functions Checking Free Space
|
||||
***************************************/
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_available(
|
||||
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
|
||||
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
|
||||
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
|
||||
ws->workspaceOversizedDuration++;
|
||||
} else {
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CWKSP_H */
|
|
@ -148,7 +148,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ _match_found:
|
|||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
_match_stored:
|
||||
/* match found */
|
||||
|
@ -278,7 +278,7 @@ _match_stored:
|
|||
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
|
@ -297,7 +297,7 @@ _match_stored:
|
|||
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
|
@ -411,7 +411,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
|
||||
|
@ -422,7 +422,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
|
@ -447,7 +447,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
|
@ -479,7 +479,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
|
||||
#include "zstd_fast.h"
|
||||
|
||||
|
||||
|
@ -43,8 +43,8 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
|||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_generic(
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
ZSTD_compressBlock_fast_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls)
|
||||
|
@ -74,8 +74,7 @@ size_t ZSTD_compressBlock_fast_generic(
|
|||
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
|
||||
ip0 += (ip0 == prefixStart);
|
||||
ip1 = ip0 + 1;
|
||||
{
|
||||
U32 const maxRep = (U32)(ip0 - prefixStart);
|
||||
{ U32 const maxRep = (U32)(ip0 - prefixStart);
|
||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
@ -118,8 +117,7 @@ size_t ZSTD_compressBlock_fast_generic(
|
|||
match0 = match1;
|
||||
goto _offset;
|
||||
}
|
||||
{
|
||||
size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
|
||||
{ size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
|
||||
assert(step >= 2);
|
||||
ip0 += step;
|
||||
ip1 += step;
|
||||
|
@ -138,7 +136,7 @@ _offset: /* Requires: ip0, match0 */
|
|||
_match: /* Requires: ip0, match0, offcode */
|
||||
/* Count the forward length */
|
||||
mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
|
||||
ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
|
||||
/* match found */
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
@ -150,16 +148,15 @@ _match: /* Requires: ip0, match0, offcode */
|
|||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
while ( (ip0 <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
|
||||
while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */
|
||||
&& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
|
||||
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||
ip0 += rLength;
|
||||
ip1 = ip0 + 1;
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
|
||||
anchor = ip0;
|
||||
continue; /* faster when present (confirmed on gcc-8) ... (?) */
|
||||
}
|
||||
|
@ -179,8 +176,7 @@ size_t ZSTD_compressBlock_fast(
|
|||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState == NULL);
|
||||
switch(mls)
|
||||
{
|
||||
|
@ -265,7 +261,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
||||
} else if ( (matchIndex <= prefixStartIndex) ) {
|
||||
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
|
||||
U32 const dictMatchIndex = dictHashTable[dictHash];
|
||||
|
@ -285,7 +281,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||
} /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
} else if (MEM_read32(match) != MEM_read32(ip)) {
|
||||
/* it's not a match, and we're not going to check the dictionary */
|
||||
|
@ -300,7 +296,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||
&& (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
|
@ -325,7 +321,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
|
@ -348,8 +344,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
|
|||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState != NULL);
|
||||
switch(mls)
|
||||
{
|
||||
|
@ -408,16 +403,17 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||
const U32 repIndex = current + 1 - offset_1;
|
||||
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
assert(offset_1 <= current +1); /* check repIndex */
|
||||
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
|
||||
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
} else {
|
||||
if ( (matchIndex < dictStartIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
|
@ -427,19 +423,15 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||
}
|
||||
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
|
||||
U32 const offset = current - matchIndex;
|
||||
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset = current - matchIndex;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
offset_2 = offset_1; offset_1 = offset; /* update offset history */
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
|
||||
|
@ -448,13 +440,13 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
|
@ -476,8 +468,7 @@ size_t ZSTD_compressBlock_fast_extDict(
|
|||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
|
|
|
@ -810,7 +810,7 @@ ZSTD_compressBlock_lazy_generic(
|
|||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
|
@ -828,7 +828,7 @@ _storeSequence:
|
|||
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue;
|
||||
|
@ -843,7 +843,7 @@ _storeSequence:
|
|||
/* store sequence */
|
||||
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
|
@ -1051,7 +1051,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
|
|||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
|
@ -1066,7 +1066,7 @@ _storeSequence:
|
|||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
|
|
|
@ -583,7 +583,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
|||
rep[i] = rep[i-1];
|
||||
rep[0] = sequence.offset;
|
||||
/* Store the sequence */
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
|
||||
sequence.offset + ZSTD_REP_MOVE,
|
||||
sequence.matchLength - MINMATCH);
|
||||
ip += sequence.matchLength;
|
||||
|
|
|
@ -1098,7 +1098,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
|||
|
||||
assert(anchor + llen <= iend);
|
||||
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
|
||||
ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
|
||||
anchor += advance;
|
||||
ip = anchor;
|
||||
} }
|
||||
|
|
|
@ -668,7 +668,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|||
|
||||
/* init */
|
||||
if (job->cdict) {
|
||||
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
|
||||
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
|
||||
assert(job->firstJob); /* only allowed for first job */
|
||||
if (ZSTD_isError(initError)) JOB_ERROR(initError);
|
||||
} else { /* srcStart points at reloaded section */
|
||||
|
@ -680,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|||
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
|
||||
ZSTD_dtlm_fast,
|
||||
NULL, /*cdict*/
|
||||
jobParams, pledgedSrcSize);
|
||||
&jobParams, pledgedSrcSize);
|
||||
if (ZSTD_isError(initError)) JOB_ERROR(initError);
|
||||
} }
|
||||
|
||||
|
@ -1028,9 +1028,9 @@ size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
|
|||
|
||||
/* Sets parameters relevant to the compression job,
|
||||
* initializing others to default values. */
|
||||
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
||||
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
|
||||
{
|
||||
ZSTD_CCtx_params jobParams = params;
|
||||
ZSTD_CCtx_params jobParams = *params;
|
||||
/* Clear parameters related to multithreading */
|
||||
jobParams.forceWindow = 0;
|
||||
jobParams.nbWorkers = 0;
|
||||
|
@ -1151,16 +1151,16 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
|
|||
/* ===== Multi-threaded compression ===== */
|
||||
/* ------------------------------------------ */
|
||||
|
||||
static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
|
||||
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
||||
{
|
||||
unsigned jobLog;
|
||||
if (params.ldmParams.enableLdm) {
|
||||
if (params->ldmParams.enableLdm) {
|
||||
/* In Long Range Mode, the windowLog is typically oversized.
|
||||
* In which case, it's preferable to determine the jobSize
|
||||
* based on chainLog instead. */
|
||||
jobLog = MAX(21, params.cParams.chainLog + 4);
|
||||
jobLog = MAX(21, params->cParams.chainLog + 4);
|
||||
} else {
|
||||
jobLog = MAX(20, params.cParams.windowLog + 2);
|
||||
jobLog = MAX(20, params->cParams.windowLog + 2);
|
||||
}
|
||||
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
|
||||
}
|
||||
|
@ -1193,27 +1193,27 @@ static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
|
|||
return ovlog;
|
||||
}
|
||||
|
||||
static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
|
||||
static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
|
||||
{
|
||||
int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
|
||||
int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
|
||||
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
|
||||
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
|
||||
assert(0 <= overlapRLog && overlapRLog <= 8);
|
||||
if (params.ldmParams.enableLdm) {
|
||||
if (params->ldmParams.enableLdm) {
|
||||
/* In Long Range Mode, the windowLog is typically oversized.
|
||||
* In which case, it's preferable to determine the jobSize
|
||||
* based on chainLog instead.
|
||||
* Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
|
||||
ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
|
||||
ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
|
||||
- overlapRLog;
|
||||
}
|
||||
assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
|
||||
DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
|
||||
DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
|
||||
DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
|
||||
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
|
||||
}
|
||||
|
||||
static unsigned
|
||||
ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
|
||||
ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
|
||||
{
|
||||
assert(nbWorkers>0);
|
||||
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
|
||||
|
@ -1236,9 +1236,9 @@ static size_t ZSTDMT_compress_advanced_internal(
|
|||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params)
|
||||
{
|
||||
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
|
||||
size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
|
||||
unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
|
||||
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(¶ms);
|
||||
size_t const overlapSize = ZSTDMT_computeOverlapSize(¶ms);
|
||||
unsigned const nbJobs = ZSTDMT_computeNbJobs(¶ms, srcSize, params.nbWorkers);
|
||||
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
|
||||
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
|
||||
const char* const srcStart = (const char*)src;
|
||||
|
@ -1256,7 +1256,7 @@ static size_t ZSTDMT_compress_advanced_internal(
|
|||
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
|
||||
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
|
||||
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
|
||||
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
|
||||
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
|
||||
}
|
||||
|
||||
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
|
||||
|
@ -1404,12 +1404,12 @@ size_t ZSTDMT_initCStream_internal(
|
|||
|
||||
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
|
||||
if (mtctx->singleBlockingThread) {
|
||||
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
|
||||
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(¶ms);
|
||||
DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
|
||||
assert(singleThreadParams.nbWorkers == 0);
|
||||
return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
|
||||
dict, dictSize, cdict,
|
||||
singleThreadParams, pledgedSrcSize);
|
||||
&singleThreadParams, pledgedSrcSize);
|
||||
}
|
||||
|
||||
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
|
||||
|
@ -1435,11 +1435,11 @@ size_t ZSTDMT_initCStream_internal(
|
|||
mtctx->cdict = cdict;
|
||||
}
|
||||
|
||||
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
|
||||
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms);
|
||||
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
|
||||
mtctx->targetSectionSize = params.jobSize;
|
||||
if (mtctx->targetSectionSize == 0) {
|
||||
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
|
||||
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms);
|
||||
}
|
||||
assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
|
||||
|
||||
|
|
|
@ -61,7 +61,9 @@
|
|||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#ifndef CHECK_F
|
||||
#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
|
|
|
@ -573,38 +573,118 @@ typedef struct {
|
|||
size_t pos;
|
||||
} seqState_t;
|
||||
|
||||
/*! ZSTD_overlapCopy8() :
|
||||
* Copies 8 bytes from ip to op and updates op and ip where ip <= op.
|
||||
* If the offset is < 8 then the offset is spread to at least 8 bytes.
|
||||
*
|
||||
* Precondition: *ip <= *op
|
||||
* Postcondition: *op - *op >= 8
|
||||
*/
|
||||
static void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
|
||||
assert(*ip <= *op);
|
||||
if (offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
|
||||
int const sub2 = dec64table[offset];
|
||||
(*op)[0] = (*ip)[0];
|
||||
(*op)[1] = (*ip)[1];
|
||||
(*op)[2] = (*ip)[2];
|
||||
(*op)[3] = (*ip)[3];
|
||||
*ip += dec32table[offset];
|
||||
ZSTD_copy4(*op+4, *ip);
|
||||
*ip -= sub2;
|
||||
} else {
|
||||
ZSTD_copy8(*op, *ip);
|
||||
}
|
||||
*ip += 8;
|
||||
*op += 8;
|
||||
assert(*op - *ip >= 8);
|
||||
}
|
||||
|
||||
/* ZSTD_execSequenceLast7():
|
||||
* exceptional case : decompress a match starting within last 7 bytes of output buffer.
|
||||
* requires more careful checks, to ensure there is no overflow.
|
||||
* performance does not matter though.
|
||||
* note : this case is supposed to be never generated "naturally" by reference encoder,
|
||||
* since in most cases it needs at least 8 bytes to look for a match.
|
||||
* but it's allowed by the specification. */
|
||||
/*! ZSTD_safecopy() :
|
||||
* Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
|
||||
* and write up to 16 bytes past oend_w (op >= oend_w is allowed).
|
||||
* This function is only called in the uncommon case where the sequence is near the end of the block. It
|
||||
* should be fast for a single long sequence, but can be slow for several short sequences.
|
||||
*
|
||||
* @param ovtype controls the overlap detection
|
||||
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
|
||||
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
|
||||
* The src buffer must be before the dst buffer.
|
||||
*/
|
||||
static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
|
||||
ptrdiff_t const diff = op - ip;
|
||||
BYTE* const oend = op + length;
|
||||
|
||||
assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8)) ||
|
||||
(ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
|
||||
|
||||
if (length < 8) {
|
||||
/* Handle short lengths. */
|
||||
while (op < oend) *op++ = *ip++;
|
||||
return;
|
||||
}
|
||||
if (ovtype == ZSTD_overlap_src_before_dst) {
|
||||
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
|
||||
assert(length >= 8);
|
||||
ZSTD_overlapCopy8(&op, &ip, diff);
|
||||
assert(op - ip >= 8);
|
||||
assert(op <= oend);
|
||||
}
|
||||
|
||||
if (oend <= oend_w) {
|
||||
/* No risk of overwrite. */
|
||||
ZSTD_wildcopy(op, ip, length, ovtype);
|
||||
return;
|
||||
}
|
||||
if (op <= oend_w) {
|
||||
/* Wildcopy until we get close to the end. */
|
||||
assert(oend > oend_w);
|
||||
ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
|
||||
ip += oend_w - op;
|
||||
op = oend_w;
|
||||
}
|
||||
/* Handle the leftovers. */
|
||||
while (op < oend) *op++ = *ip++;
|
||||
}
|
||||
|
||||
/* ZSTD_execSequenceEnd():
|
||||
* This version handles cases that are near the end of the output buffer. It requires
|
||||
* more careful checks to make sure there is no overflow. By separating out these hard
|
||||
* and unlikely cases, we can speed up the common cases.
|
||||
*
|
||||
* NOTE: This function needs to be fast for a single long sequence, but doesn't need
|
||||
* to be optimized for many small sequences, since those fall into ZSTD_execSequence().
|
||||
*/
|
||||
FORCE_NOINLINE
|
||||
size_t ZSTD_execSequenceLast7(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
size_t ZSTD_execSequenceEnd(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
|
||||
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
|
||||
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
|
||||
const BYTE* match = oLitEnd - sequence.offset;
|
||||
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
|
||||
|
||||
/* check */
|
||||
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
|
||||
/* bounds checks */
|
||||
assert(oLitEnd < oMatchEnd);
|
||||
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
|
||||
|
||||
/* copy literals */
|
||||
while (op < oLitEnd) *op++ = *(*litPtr)++;
|
||||
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
|
||||
op = oLitEnd;
|
||||
*litPtr = iLitEnd;
|
||||
|
||||
/* copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - base)) {
|
||||
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
|
||||
/* offset beyond prefix */
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
|
||||
match = dictEnd - (base-match);
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
|
||||
match = dictEnd - (prefixStart-match);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
|
@ -614,13 +694,12 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
|
|||
memmove(oLitEnd, match, length1);
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
match = prefixStart;
|
||||
} }
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
|
||||
return sequenceLength;
|
||||
}
|
||||
|
||||
|
||||
HINT_INLINE
|
||||
size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
|
@ -634,20 +713,29 @@ size_t ZSTD_execSequence(BYTE* op,
|
|||
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
|
||||
const BYTE* match = oLitEnd - sequence.offset;
|
||||
|
||||
/* check */
|
||||
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
|
||||
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
|
||||
/* Errors and uncommon cases handled here. */
|
||||
assert(oLitEnd < oMatchEnd);
|
||||
if (iLitEnd > litLimit || oMatchEnd > oend_w)
|
||||
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
|
||||
|
||||
/* copy Literals */
|
||||
if (sequence.litLength > 8)
|
||||
ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
|
||||
else
|
||||
ZSTD_copy8(op, *litPtr);
|
||||
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
|
||||
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
|
||||
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
|
||||
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
|
||||
|
||||
/* Copy Literals:
|
||||
* Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
|
||||
* We likely don't need the full 32-byte wildcopy.
|
||||
*/
|
||||
assert(WILDCOPY_OVERLENGTH >= 16);
|
||||
ZSTD_copy16(op, (*litPtr));
|
||||
if (sequence.litLength > 16) {
|
||||
ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
|
||||
}
|
||||
op = oLitEnd;
|
||||
*litPtr = iLitEnd; /* update for next sequence */
|
||||
|
||||
/* copy Match */
|
||||
/* Copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
|
||||
/* offset beyond prefix -> go into extDict */
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
|
||||
|
@ -662,123 +750,33 @@ size_t ZSTD_execSequence(BYTE* op,
|
|||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = prefixStart;
|
||||
if (op > oend_w || sequence.matchLength < MINMATCH) {
|
||||
U32 i;
|
||||
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
|
||||
return sequenceLength;
|
||||
}
|
||||
} }
|
||||
/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
|
||||
/* Match within prefix of 1 or more bytes */
|
||||
assert(op <= oMatchEnd);
|
||||
assert(oMatchEnd <= oend_w);
|
||||
assert(match >= prefixStart);
|
||||
assert(sequence.matchLength >= 1);
|
||||
|
||||
/* match within prefix */
|
||||
if (sequence.offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
|
||||
int const sub2 = dec64table[sequence.offset];
|
||||
op[0] = match[0];
|
||||
op[1] = match[1];
|
||||
op[2] = match[2];
|
||||
op[3] = match[3];
|
||||
match += dec32table[sequence.offset];
|
||||
ZSTD_copy4(op+4, match);
|
||||
match -= sub2;
|
||||
} else {
|
||||
ZSTD_copy8(op, match);
|
||||
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
|
||||
* without overlap checking.
|
||||
*/
|
||||
if (sequence.offset >= WILDCOPY_VECLEN) {
|
||||
/* We bet on a full wildcopy for matches, since we expect matches to be
|
||||
* longer than literals (in general). In silesia, ~10% of matches are longer
|
||||
* than 16 bytes.
|
||||
*/
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
|
||||
return sequenceLength;
|
||||
}
|
||||
op += 8; match += 8;
|
||||
assert(sequence.offset < WILDCOPY_VECLEN);
|
||||
|
||||
if (oMatchEnd > oend-(16-MINMATCH)) {
|
||||
if (op < oend_w) {
|
||||
ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
|
||||
match += oend_w - op;
|
||||
op = oend_w;
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
/* Copy 8 bytes and spread the offset to be >= 8. */
|
||||
ZSTD_overlapCopy8(&op, &match, sequence.offset);
|
||||
|
||||
|
||||
HINT_INLINE
|
||||
size_t ZSTD_execSequenceLong(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
|
||||
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
|
||||
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
|
||||
const BYTE* match = sequence.match;
|
||||
|
||||
/* check */
|
||||
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
|
||||
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
|
||||
if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
|
||||
|
||||
/* copy Literals */
|
||||
if (sequence.litLength > 8)
|
||||
ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
|
||||
else
|
||||
ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
|
||||
|
||||
op = oLitEnd;
|
||||
*litPtr = iLitEnd; /* update for next sequence */
|
||||
|
||||
/* copy Match */
|
||||
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
|
||||
/* offset beyond prefix */
|
||||
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
}
|
||||
/* span extDict & currentPrefixSegment */
|
||||
{ size_t const length1 = dictEnd - match;
|
||||
memmove(oLitEnd, match, length1);
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = prefixStart;
|
||||
if (op > oend_w || sequence.matchLength < MINMATCH) {
|
||||
U32 i;
|
||||
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
|
||||
return sequenceLength;
|
||||
}
|
||||
} }
|
||||
assert(op <= oend_w);
|
||||
assert(sequence.matchLength >= MINMATCH);
|
||||
|
||||
/* match within prefix */
|
||||
if (sequence.offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
|
||||
int const sub2 = dec64table[sequence.offset];
|
||||
op[0] = match[0];
|
||||
op[1] = match[1];
|
||||
op[2] = match[2];
|
||||
op[3] = match[3];
|
||||
match += dec32table[sequence.offset];
|
||||
ZSTD_copy4(op+4, match);
|
||||
match -= sub2;
|
||||
} else {
|
||||
ZSTD_copy8(op, match);
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-(16-MINMATCH)) {
|
||||
if (op < oend_w) {
|
||||
ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
|
||||
match += oend_w - op;
|
||||
op = oend_w;
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
|
||||
/* If the match length is > 8 bytes, then continue with the wildcopy. */
|
||||
if (sequence.matchLength > 8) {
|
||||
assert(op < oMatchEnd);
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
|
@ -1098,7 +1096,7 @@ ZSTD_decompressSequencesLong_body(
|
|||
/* decode and decompress */
|
||||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
|
||||
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
|
||||
sequences[seqNb & STORED_SEQS_MASK] = sequence;
|
||||
|
@ -1109,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
|
|||
/* finish queue */
|
||||
seqNb -= seqAdvance;
|
||||
for ( ; seqNb<nbSeq ; seqNb++) {
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
|
|
@ -638,8 +638,8 @@ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLeve
|
|||
"compared to the source size %u! "
|
||||
"size(source)/size(dictionary) = %f, but it should be >= "
|
||||
"10! This may lead to a subpar dictionary! We recommend "
|
||||
"training on sources at least 10x, and up to 100x the "
|
||||
"size of the dictionary!\n", (U32)maxDictSize,
|
||||
"training on sources at least 10x, and preferably 100x "
|
||||
"the size of the dictionary! \n", (U32)maxDictSize,
|
||||
(U32)nbDmers, ratio);
|
||||
}
|
||||
|
||||
|
@ -919,13 +919,12 @@ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
|
|||
}
|
||||
}
|
||||
/* Save the dictionary, parameters, and size */
|
||||
if (!dict) {
|
||||
return;
|
||||
if (dict) {
|
||||
memcpy(best->dict, dict, dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
memcpy(best->dict, dict, dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
if (liveJobs == 0) {
|
||||
ZSTD_pthread_cond_broadcast(&best->cond);
|
||||
|
|
|
@ -571,7 +571,7 @@ static void ZDICT_fillNoise(void* buffer, size_t length)
|
|||
unsigned const prime1 = 2654435761U;
|
||||
unsigned const prime2 = 2246822519U;
|
||||
unsigned acc = prime1;
|
||||
size_t p=0;;
|
||||
size_t p=0;
|
||||
for (p=0; p<length; p++) {
|
||||
acc *= prime2;
|
||||
((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
|
||||
|
|
|
@ -346,7 +346,7 @@ FORCE_INLINE unsigned FSE_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
|
|
@ -353,7 +353,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
@ -2889,6 +2889,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
|||
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
|
||||
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
|
||||
{
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
if (litSize > srcSize-3) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
|
|
|
@ -356,7 +356,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
|
|
@ -627,7 +627,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
@ -2655,6 +2655,7 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
|||
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
|
||||
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
|
||||
{
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
if (litSize > srcSize-3) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
|
@ -3034,9 +3035,12 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
|||
{
|
||||
/* blockType == blockCompressed */
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t litCSize;
|
||||
|
||||
if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
|
||||
/* Decode literals sub-block */
|
||||
size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
|
||||
litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
|
||||
if (ZSTD_isError(litCSize)) return litCSize;
|
||||
ip += litCSize;
|
||||
srcSize -= litCSize;
|
||||
|
|
|
@ -756,7 +756,7 @@ MEM_STATIC unsigned BITv05_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
|
|
@ -860,7 +860,7 @@ MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
|
|
@ -530,7 +530,7 @@ MEM_STATIC unsigned BITv07_highbit32 (U32 val)
|
|||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
return __builtin_clz (val) ^ 31;
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
|
|
48
lib/zstd.h
48
lib/zstd.h
|
@ -15,6 +15,7 @@ extern "C" {
|
|||
#define ZSTD_H_235446
|
||||
|
||||
/* ====== Dependency ======*/
|
||||
#include <limits.h> /* INT_MAX */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
|
@ -71,7 +72,7 @@ extern "C" {
|
|||
/*------ Version ------*/
|
||||
#define ZSTD_VERSION_MAJOR 1
|
||||
#define ZSTD_VERSION_MINOR 4
|
||||
#define ZSTD_VERSION_RELEASE 3
|
||||
#define ZSTD_VERSION_RELEASE 4
|
||||
|
||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
|
||||
|
@ -386,6 +387,7 @@ typedef enum {
|
|||
* ZSTD_c_forceAttachDict
|
||||
* ZSTD_c_literalCompressionMode
|
||||
* ZSTD_c_targetCBlockSize
|
||||
* ZSTD_c_srcSizeHint
|
||||
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
||||
* note : never ever use experimentalParam? names directly;
|
||||
* also, the enums values themselves are unstable and can still change.
|
||||
|
@ -396,6 +398,7 @@ typedef enum {
|
|||
ZSTD_c_experimentalParam4=1001,
|
||||
ZSTD_c_experimentalParam5=1002,
|
||||
ZSTD_c_experimentalParam6=1003,
|
||||
ZSTD_c_experimentalParam7=1004,
|
||||
} ZSTD_cParameter;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1063,6 +1066,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||
/* Advanced parameter bounds */
|
||||
#define ZSTD_TARGETCBLOCKSIZE_MIN 64
|
||||
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
|
||||
#define ZSTD_SRCSIZEHINT_MIN 0
|
||||
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
|
||||
|
||||
/* internal */
|
||||
#define ZSTD_HASHLOG3_MAX 17
|
||||
|
@ -1216,14 +1221,23 @@ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
|
|||
***************************************/
|
||||
|
||||
/*! ZSTD_estimate*() :
|
||||
* These functions make it possible to estimate memory usage
|
||||
* of a future {D,C}Ctx, before its creation.
|
||||
* ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
|
||||
* It will also consider src size to be arbitrarily "large", which is worst case.
|
||||
* If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
|
||||
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||
* Note : CCtx size estimation is only correct for single-threaded compression. */
|
||||
* These functions make it possible to estimate memory usage of a future
|
||||
* {D,C}Ctx, before its creation.
|
||||
*
|
||||
* ZSTD_estimateCCtxSize() will provide a budget large enough for any
|
||||
* compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
|
||||
* this estimate does not include space for a window buffer, so this estimate
|
||||
* is guaranteed to be enough for single-shot compressions, but not streaming
|
||||
* compressions. It will however assume the input may be arbitrarily large,
|
||||
* which is the worst case. If srcSize is known to always be small,
|
||||
* ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
|
||||
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
|
||||
* ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
|
||||
* ZSTD_CCtxParams_setParameter().
|
||||
*
|
||||
* Note: only single-threaded compression is supported. This function will
|
||||
* return an error code if ZSTD_c_nbWorkers is >= 1. */
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||
|
@ -1441,6 +1455,12 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|||
* There is no guarantee on compressed block size (default:0) */
|
||||
#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
|
||||
|
||||
/* User's best guess of source size.
|
||||
* Hint is not valid when srcSizeHint == 0.
|
||||
* There is no guarantee that hint is close to actual source size,
|
||||
* but compression ratio may regress significantly if guess considerably underestimates */
|
||||
#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
|
||||
|
||||
/*! ZSTD_CCtx_getParameter() :
|
||||
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
|
||||
* and store it into int* value.
|
||||
|
@ -1630,7 +1650,10 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dic
|
|||
/**! ZSTD_initCStream_advanced() :
|
||||
* This function is deprecated, and is approximately equivalent to:
|
||||
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
||||
* ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
|
||||
* // Pseudocode: Set each zstd parameter and leave the rest as-is.
|
||||
* for ((param, value) : params) {
|
||||
* ZSTD_CCtx_setParameter(zcs, param, value);
|
||||
* }
|
||||
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
|
||||
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
|
||||
*
|
||||
|
@ -1650,7 +1673,10 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDi
|
|||
/**! ZSTD_initCStream_usingCDict_advanced() :
|
||||
* This function is deprecated, and is approximately equivalent to:
|
||||
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
||||
* ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
|
||||
* // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
|
||||
* for ((fParam, value) : fParams) {
|
||||
* ZSTD_CCtx_setParameter(zcs, fParam, value);
|
||||
* }
|
||||
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
|
||||
* ZSTD_CCtx_refCDict(zcs, cdict);
|
||||
*
|
||||
|
|
|
@ -201,7 +201,7 @@ static void DiB_fillNoise(void* buffer, size_t length)
|
|||
unsigned const prime1 = 2654435761U;
|
||||
unsigned const prime2 = 2246822519U;
|
||||
unsigned acc = prime1;
|
||||
size_t p=0;;
|
||||
size_t p=0;
|
||||
|
||||
for (p=0; p<length; p++) {
|
||||
acc *= prime2;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <string.h> /* strcmp, strlen */
|
||||
#include <assert.h>
|
||||
#include <errno.h> /* errno */
|
||||
#include <limits.h> /* INT_MAX */
|
||||
#include <signal.h>
|
||||
#include "timefn.h" /* UTIL_getTime, UTIL_clockSpanMicro */
|
||||
|
||||
|
@ -304,7 +305,9 @@ struct FIO_prefs_s {
|
|||
int ldmMinMatch;
|
||||
int ldmBucketSizeLog;
|
||||
int ldmHashRateLog;
|
||||
size_t streamSrcSize;
|
||||
size_t targetCBlockSize;
|
||||
int srcSizeHint;
|
||||
ZSTD_literalCompressionMode_e literalCompressionMode;
|
||||
|
||||
/* IO preferences */
|
||||
|
@ -349,7 +352,9 @@ FIO_prefs_t* FIO_createPreferences(void)
|
|||
ret->ldmMinMatch = 0;
|
||||
ret->ldmBucketSizeLog = FIO_LDM_PARAM_NOTSET;
|
||||
ret->ldmHashRateLog = FIO_LDM_PARAM_NOTSET;
|
||||
ret->streamSrcSize = 0;
|
||||
ret->targetCBlockSize = 0;
|
||||
ret->srcSizeHint = 0;
|
||||
ret->literalCompressionMode = ZSTD_lcm_auto;
|
||||
return ret;
|
||||
}
|
||||
|
@ -418,10 +423,18 @@ void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable) {
|
|||
prefs->rsyncable = rsyncable;
|
||||
}
|
||||
|
||||
void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize) {
|
||||
prefs->streamSrcSize = streamSrcSize;
|
||||
}
|
||||
|
||||
void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize) {
|
||||
prefs->targetCBlockSize = targetCBlockSize;
|
||||
}
|
||||
|
||||
void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint) {
|
||||
prefs->srcSizeHint = (int)MIN((size_t)INT_MAX, srcSizeHint);
|
||||
}
|
||||
|
||||
void FIO_setLiteralCompressionMode(
|
||||
FIO_prefs_t* const prefs,
|
||||
ZSTD_literalCompressionMode_e mode) {
|
||||
|
@ -633,7 +646,6 @@ typedef struct {
|
|||
|
||||
static cRess_t FIO_createCResources(FIO_prefs_t* const prefs,
|
||||
const char* dictFileName, int cLevel,
|
||||
U64 srcSize,
|
||||
ZSTD_compressionParameters comprParams) {
|
||||
cRess_t ress;
|
||||
memset(&ress, 0, sizeof(ress));
|
||||
|
@ -667,6 +679,8 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs,
|
|||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, cLevel) );
|
||||
/* max compressed block size */
|
||||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_targetCBlockSize, (int)prefs->targetCBlockSize) );
|
||||
/* source size hint */
|
||||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_srcSizeHint, (int)prefs->srcSizeHint) );
|
||||
/* long distance matching */
|
||||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_enableLongDistanceMatching, prefs->ldmFlag) );
|
||||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_ldmHashLog, prefs->ldmHashLog) );
|
||||
|
@ -698,10 +712,7 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs,
|
|||
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_rsyncable, prefs->rsyncable) );
|
||||
#endif
|
||||
/* dictionary */
|
||||
CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, srcSize) ); /* set the value temporarily for dictionary loading, to adapt compression parameters */
|
||||
CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, dictBuffer, dictBuffSize) );
|
||||
CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, ZSTD_CONTENTSIZE_UNKNOWN) ); /* reset */
|
||||
|
||||
free(dictBuffer);
|
||||
}
|
||||
|
||||
|
@ -1003,6 +1014,9 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs,
|
|||
/* init */
|
||||
if (fileSize != UTIL_FILESIZE_UNKNOWN) {
|
||||
CHECK(ZSTD_CCtx_setPledgedSrcSize(ress.cctx, fileSize));
|
||||
} else if (prefs->streamSrcSize > 0) {
|
||||
/* unknown source size; use the declared stream size */
|
||||
CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, prefs->streamSrcSize) );
|
||||
}
|
||||
(void)srcFileName;
|
||||
|
||||
|
@ -1361,10 +1375,7 @@ int FIO_compressFilename(FIO_prefs_t* const prefs,
|
|||
const char* dictFileName, int compressionLevel,
|
||||
ZSTD_compressionParameters comprParams)
|
||||
{
|
||||
U64 const fileSize = UTIL_getFileSize(srcFileName);
|
||||
U64 const srcSize = (fileSize == UTIL_FILESIZE_UNKNOWN) ? ZSTD_CONTENTSIZE_UNKNOWN : fileSize;
|
||||
|
||||
cRess_t const ress = FIO_createCResources(prefs, dictFileName, compressionLevel, srcSize, comprParams);
|
||||
cRess_t const ress = FIO_createCResources(prefs, dictFileName, compressionLevel, comprParams);
|
||||
int const result = FIO_compressFilename_srcFile(prefs, ress, dstFileName, srcFileName, compressionLevel);
|
||||
|
||||
|
||||
|
@ -1415,10 +1426,7 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
|
|||
ZSTD_compressionParameters comprParams)
|
||||
{
|
||||
int error = 0;
|
||||
U64 const firstFileSize = UTIL_getFileSize(inFileNamesTable[0]);
|
||||
U64 const firstSrcSize = (firstFileSize == UTIL_FILESIZE_UNKNOWN) ? ZSTD_CONTENTSIZE_UNKNOWN : firstFileSize;
|
||||
U64 const srcSize = (nbFiles != 1) ? ZSTD_CONTENTSIZE_UNKNOWN : firstSrcSize ;
|
||||
cRess_t ress = FIO_createCResources(prefs, dictFileName, compressionLevel, srcSize, comprParams);
|
||||
cRess_t ress = FIO_createCResources(prefs, dictFileName, compressionLevel, comprParams);
|
||||
|
||||
/* init */
|
||||
assert(outFileName != NULL || suffix != NULL);
|
||||
|
@ -1711,11 +1719,6 @@ static unsigned long long FIO_decompressZstdFrame(
|
|||
}
|
||||
|
||||
if (readSizeHint == 0) break; /* end of frame */
|
||||
if (inBuff.size != inBuff.pos) {
|
||||
DISPLAYLEVEL(1, "%s : Decoding error (37) : should consume entire input \n",
|
||||
srcFileName);
|
||||
return FIO_ERROR_FRAME_DECODING;
|
||||
}
|
||||
|
||||
/* Fill input buffer */
|
||||
{ size_t const toDecode = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */
|
||||
|
|
|
@ -71,7 +71,9 @@ void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog);
|
|||
void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag);
|
||||
void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */
|
||||
void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable);
|
||||
void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize);
|
||||
void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize);
|
||||
void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint);
|
||||
void FIO_setLiteralCompressionMode(
|
||||
FIO_prefs_t* const prefs,
|
||||
ZSTD_literalCompressionMode_e mode);
|
||||
|
|
|
@ -92,7 +92,7 @@ extern "C" {
|
|||
|
||||
# if defined(__linux__) || defined(__linux)
|
||||
# ifndef _POSIX_C_SOURCE
|
||||
# define _POSIX_C_SOURCE 200112L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */
|
||||
# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */
|
||||
# endif
|
||||
# endif
|
||||
# include <unistd.h> /* declares _POSIX_VERSION */
|
||||
|
|
|
@ -19,12 +19,6 @@ extern "C" {
|
|||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <sys/types.h> /* utime */
|
||||
#if defined(_MSC_VER)
|
||||
# include <sys/utime.h> /* utime */
|
||||
#else
|
||||
# include <utime.h> /* utime */
|
||||
#endif
|
||||
#include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */
|
||||
|
||||
|
||||
|
|
|
@ -54,14 +54,25 @@ int UTIL_getFileStat(const char* infilename, stat_t *statbuf)
|
|||
int UTIL_setFileStat(const char *filename, stat_t *statbuf)
|
||||
{
|
||||
int res = 0;
|
||||
struct utimbuf timebuf;
|
||||
|
||||
if (!UTIL_isRegularFile(filename))
|
||||
return -1;
|
||||
|
||||
timebuf.actime = time(NULL);
|
||||
timebuf.modtime = statbuf->st_mtime;
|
||||
res += utime(filename, &timebuf); /* set access and modification times */
|
||||
/* set access and modification times */
|
||||
#if defined(_WIN32) || (PLATFORM_POSIX_VERSION < 200809L)
|
||||
{
|
||||
struct utimbuf timebuf;
|
||||
timebuf.actime = time(NULL);
|
||||
timebuf.modtime = statbuf->st_mtime;
|
||||
res += utime(filename, &timebuf);
|
||||
}
|
||||
#else
|
||||
{
|
||||
/* (atime, mtime) */
|
||||
struct timespec timebuf[2] = { {0, UTIME_NOW}, statbuf->st_mtim };
|
||||
res += utimensat(AT_FDCWD, filename, timebuf, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32)
|
||||
res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */
|
||||
|
|
|
@ -25,12 +25,17 @@ extern "C" {
|
|||
#include <stdio.h> /* fprintf */
|
||||
#include <sys/types.h> /* stat, utime */
|
||||
#include <sys/stat.h> /* stat, chmod */
|
||||
#if defined(_MSC_VER)
|
||||
#if defined(_WIN32)
|
||||
# include <sys/utime.h> /* utime */
|
||||
# include <io.h> /* _chmod */
|
||||
#else
|
||||
# include <unistd.h> /* chown, stat */
|
||||
#if PLATFORM_POSIX_VERSION < 200809L
|
||||
# include <utime.h> /* utime */
|
||||
#else
|
||||
# include <fcntl.h> /* AT_FDCWD */
|
||||
# include <sys/stat.h> /* utimensat */
|
||||
#endif
|
||||
#endif
|
||||
#include <time.h> /* clock_t, clock, CLOCKS_PER_SEC, nanosleep */
|
||||
#include "mem.h" /* U32, U64 */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.
|
||||
.TH "ZSTD" "1" "August 2019" "zstd 1.4.3" "User Commands"
|
||||
.TH "ZSTD" "1" "September 2019" "zstd 1.4.4" "User Commands"
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
|
||||
|
@ -127,6 +127,14 @@ Does not spawn a thread for compression, use a single thread for both I/O and co
|
|||
\fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-stream\-size=#\fR
|
||||
Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-size\-hint=#\fR
|
||||
When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-rsyncable\fR
|
||||
\fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your milage may vary\.
|
||||
.
|
||||
|
|
|
@ -144,6 +144,18 @@ the last one takes effect.
|
|||
Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible.
|
||||
_note_ : at the time of this writing, `--adapt` can remain stuck at low speed
|
||||
when combined with multiple worker threads (>=2).
|
||||
* `--stream-size=#` :
|
||||
Sets the pledged source size of input coming from a stream. This value must be exact, as it
|
||||
will be included in the produced frame header. Incorrect stream sizes will cause an error.
|
||||
This information will be used to better optimize compression parameters, resulting in
|
||||
better and potentially faster compression, especially for smaller source sizes.
|
||||
* `--size-hint=#`:
|
||||
When handling input from a stream, `zstd` must guess how large the source size
|
||||
will be when optimizing compression parameters. If the stream size is relatively
|
||||
small, this guess may be a poor one, resulting in a higher compression ratio than
|
||||
expected. This feature allows for controlling the guess when needed.
|
||||
Exact guesses result in better compression ratios. Overestimates result in slightly
|
||||
degraded compression ratios, while underestimates may result in significant degradation.
|
||||
* `--rsyncable` :
|
||||
`zstd` will periodically synchronize the compression state to make the
|
||||
compressed file more rsync-friendly. There is a negligible impact to
|
||||
|
|
|
@ -141,6 +141,8 @@ static int usage_advanced(const char* programName)
|
|||
DISPLAY( "--long[=#]: enable long distance matching with given window log (default: %u)\n", g_defaultMaxWindowLog);
|
||||
DISPLAY( "--fast[=#]: switch to ultra fast compression level (default: %u)\n", 1);
|
||||
DISPLAY( "--adapt : dynamically adapt compression level to I/O conditions \n");
|
||||
DISPLAY( "--stream-size=# : optimize compression parameters for streaming input of given number of bytes \n");
|
||||
DISPLAY( "--size-hint=# optimize compression parameters for streaming input of approximately this size\n");
|
||||
DISPLAY( "--target-compressed-block-size=# : make compressed block near targeted size \n");
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
DISPLAY( " -T# : spawns # compression threads (default: 1, 0==# cores) \n");
|
||||
|
@ -588,7 +590,9 @@ int main(int argCount, const char* argv[])
|
|||
const char* suffix = ZSTD_EXTENSION;
|
||||
unsigned maxDictSize = g_defaultMaxDictSize;
|
||||
unsigned dictID = 0;
|
||||
size_t streamSrcSize = 0;
|
||||
size_t targetCBlockSize = 0;
|
||||
size_t srcSizeHint = 0;
|
||||
int dictCLevel = g_defaultDictCLevel;
|
||||
unsigned dictSelect = g_defaultSelectivityLevel;
|
||||
#ifdef UTIL_HAS_CREATEFILELIST
|
||||
|
@ -745,7 +749,9 @@ int main(int argCount, const char* argv[])
|
|||
if (longCommandWArg(&argument, "--maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; }
|
||||
if (longCommandWArg(&argument, "--dictID=")) { dictID = readU32FromChar(&argument); continue; }
|
||||
if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) CLEAN_RETURN(badusage(programName)); continue; }
|
||||
if (longCommandWArg(&argument, "--stream-size=")) { streamSrcSize = readU32FromChar(&argument); continue; }
|
||||
if (longCommandWArg(&argument, "--target-compressed-block-size=")) { targetCBlockSize = readU32FromChar(&argument); continue; }
|
||||
if (longCommandWArg(&argument, "--size-hint=")) { srcSizeHint = readU32FromChar(&argument); continue; }
|
||||
if (longCommandWArg(&argument, "--long")) {
|
||||
unsigned ldmWindowLog = 0;
|
||||
ldmFlag = 1;
|
||||
|
@ -1150,7 +1156,9 @@ int main(int argCount, const char* argv[])
|
|||
FIO_setAdaptMin(prefs, adaptMin);
|
||||
FIO_setAdaptMax(prefs, adaptMax);
|
||||
FIO_setRsyncable(prefs, rsyncable);
|
||||
FIO_setStreamSrcSize(prefs, streamSrcSize);
|
||||
FIO_setTargetCBlockSize(prefs, targetCBlockSize);
|
||||
FIO_setSrcSizeHint(prefs, srcSizeHint);
|
||||
FIO_setLiteralCompressionMode(prefs, literalCompressionMode);
|
||||
if (adaptMin > cLevel) cLevel = adaptMin;
|
||||
if (adaptMax < cLevel) cLevel = adaptMax;
|
||||
|
@ -1160,7 +1168,7 @@ int main(int argCount, const char* argv[])
|
|||
else
|
||||
operationResult = FIO_compressMultipleFilenames(prefs, filenameTable, filenameIdx, outFileName, suffix, dictFileName, cLevel, compressionParams);
|
||||
#else
|
||||
(void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; /* not used when ZSTD_NOCOMPRESS set */
|
||||
(void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; /* not used when ZSTD_NOCOMPRESS set */
|
||||
DISPLAY("Compression not supported \n");
|
||||
#endif
|
||||
} else { /* decompression or test */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.
|
||||
.TH "ZSTDGREP" "1" "August 2019" "zstd 1.4.3" "User Commands"
|
||||
.TH "ZSTDGREP" "1" "September 2019" "zstd 1.4.4" "User Commands"
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.
|
||||
.TH "ZSTDLESS" "1" "August 2019" "zstd 1.4.3" "User Commands"
|
||||
.TH "ZSTDLESS" "1" "September 2019" "zstd 1.4.4" "User Commands"
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBzstdless\fR \- view zstandard\-compressed files
|
||||
|
|
|
@ -758,8 +758,8 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
|
|||
DISPLAYLEVEL(7, " repeat offset: %d\n", (int)repIndex);
|
||||
}
|
||||
/* use libzstd sequence handling */
|
||||
ZSTD_storeSeq(seqStore, literalLen, literals, offsetCode,
|
||||
matchLen - MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, literalLen, literals, literals + literalLen,
|
||||
offsetCode, matchLen - MINMATCH);
|
||||
|
||||
literalsSize -= literalLen;
|
||||
excessMatch -= (matchLen - MIN_SEQ_LEN);
|
||||
|
|
|
@ -40,8 +40,8 @@ FUZZ_LDFLAGS := -pthread $(LDFLAGS)
|
|||
FUZZ_ARFLAGS := $(ARFLAGS)
|
||||
FUZZ_TARGET_FLAGS = $(FUZZ_CPPFLAGS) $(FUZZ_CXXFLAGS) $(FUZZ_LDFLAGS)
|
||||
|
||||
FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h
|
||||
FUZZ_SRC := $(PRGDIR)/util.c zstd_helpers.c
|
||||
FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h fuzz_data_producer.h
|
||||
FUZZ_SRC := $(PRGDIR)/util.c zstd_helpers.c fuzz_data_producer.c
|
||||
|
||||
ZSTDCOMMON_SRC := $(ZSTDDIR)/common/*.c
|
||||
ZSTDCOMP_SRC := $(ZSTDDIR)/compress/*.c
|
||||
|
@ -113,15 +113,6 @@ zstd_frame_info: $(FUZZ_HEADERS) $(FUZZ_OBJ) zstd_frame_info.o
|
|||
libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h $(PRGDIR)/util.c regression_driver.o
|
||||
$(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o
|
||||
|
||||
# Install libfuzzer (not usable for MSAN testing)
|
||||
# Provided for convenience. To use this library run make libFuzzer and
|
||||
# set LDFLAGS=-L.
|
||||
.PHONY: libFuzzer
|
||||
libFuzzer:
|
||||
@$(RM) -rf Fuzzer
|
||||
@git clone https://chromium.googlesource.com/chromium/llvm-project/compiler-rt/lib/fuzzer Fuzzer
|
||||
@cd Fuzzer && ./build.sh
|
||||
|
||||
corpora/%_seed_corpus.zip:
|
||||
@mkdir -p corpora
|
||||
$(DOWNLOAD) $@ $(CORPORA_URL_PREFIX)$*_seed_corpus.zip
|
||||
|
|
|
@ -35,6 +35,8 @@ The environment variables can be overridden with the corresponding flags
|
|||
`--cc`, `--cflags`, etc.
|
||||
The specific fuzzing engine is selected with `LIB_FUZZING_ENGINE` or
|
||||
`--lib-fuzzing-engine`, the default is `libregression.a`.
|
||||
Alternatively, you can use Clang's built in fuzzing engine with
|
||||
`--enable-fuzzer`.
|
||||
It has flags that can easily set up sanitizers `--enable-{a,ub,m}san`, and
|
||||
coverage instrumentation `--enable-coverage`.
|
||||
It sets sane defaults which can be overridden with flags `--debug`,
|
||||
|
@ -51,22 +53,25 @@ The command used to run the fuzzer is printed for debugging.
|
|||
## LibFuzzer
|
||||
|
||||
```
|
||||
# Build libfuzzer if necessary
|
||||
make libFuzzer
|
||||
# Build the fuzz targets
|
||||
./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan --lib-fuzzing-engine Fuzzer/libFuzzer.a --cc clang --cxx clang++
|
||||
./fuzz.py build all --enable-fuzzer --enable-asan --enable-ubsan --cc clang --cxx clang++
|
||||
# OR equivalently
|
||||
CC=clang CXX=clang++ LIB_FUZZING_ENGINE=Fuzzer/libFuzzer.a ./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan
|
||||
CC=clang CXX=clang++ ./fuzz.py build all --enable-fuzzer --enable-asan --enable-ubsan
|
||||
# Run the fuzzer
|
||||
./fuzz.py libfuzzer TARGET -max_len=8192 -jobs=4
|
||||
./fuzz.py libfuzzer TARGET <libfuzzer args like -jobs=4>
|
||||
```
|
||||
|
||||
where `TARGET` could be `simple_decompress`, `stream_round_trip`, etc.
|
||||
|
||||
### MSAN
|
||||
|
||||
Fuzzing with `libFuzzer` and `MSAN` will require building a C++ standard library
|
||||
and libFuzzer with MSAN.
|
||||
Fuzzing with `libFuzzer` and `MSAN` is as easy as:
|
||||
|
||||
```
|
||||
CC=clang CXX=clang++ ./fuzz.py build all --enable-fuzzer --enable-msan
|
||||
./fuzz.py libfuzzer TARGET <libfuzzer args>
|
||||
```
|
||||
|
||||
`fuzz.py` respects the environment variables / flags `MSAN_EXTRA_CPPFLAGS`,
|
||||
`MSAN_EXTRA_CFLAGS`, `MSAN_EXTRA_CXXFLAGS`, `MSAN_EXTRA_LDFLAGS` to easily pass
|
||||
the extra parameters only for MSAN.
|
||||
|
@ -85,7 +90,7 @@ CC=afl-clang CXX=afl-clang++ ./fuzz.py build all --enable-asan --enable-ubsan
|
|||
|
||||
## Regression Testing
|
||||
|
||||
The regression rest supports the `all` target to run all the fuzzers in one
|
||||
The regression test supports the `all` target to run all the fuzzers in one
|
||||
command.
|
||||
|
||||
```
|
||||
|
|
|
@ -28,8 +28,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
{
|
||||
size_t const neededBufSize = ZSTD_BLOCKSIZE_MAX;
|
||||
|
||||
FUZZ_seed(&src, &size);
|
||||
|
||||
/* Allocate all buffers and contexts if not already allocated */
|
||||
if (neededBufSize > bufSize) {
|
||||
free(rBuf);
|
||||
|
|
|
@ -20,21 +20,20 @@
|
|||
#include <string.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd.h"
|
||||
|
||||
static const int kMaxClevel = 19;
|
||||
#include "zstd_helpers.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_CCtx *cctx = NULL;
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
static void* cBuf = NULL;
|
||||
static void* rBuf = NULL;
|
||||
static size_t bufSize = 0;
|
||||
static uint32_t seed;
|
||||
|
||||
static size_t roundTripTest(void *result, size_t resultCapacity,
|
||||
void *compressed, size_t compressedCapacity,
|
||||
const void *src, size_t srcSize)
|
||||
const void *src, size_t srcSize,
|
||||
int cLevel)
|
||||
{
|
||||
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
|
||||
ZSTD_parameters const params = ZSTD_getParams(cLevel, srcSize, 0);
|
||||
size_t ret = ZSTD_compressBegin_advanced(cctx, NULL, 0, params, srcSize);
|
||||
FUZZ_ZASSERT(ret);
|
||||
|
@ -52,12 +51,16 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
|
|||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
size_t neededBufSize;
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
seed = FUZZ_seed(&src, &size);
|
||||
neededBufSize = size;
|
||||
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
|
||||
|
||||
size_t neededBufSize = size;
|
||||
if (size > ZSTD_BLOCKSIZE_MAX)
|
||||
return 0;
|
||||
size = ZSTD_BLOCKSIZE_MAX;
|
||||
|
||||
/* Allocate all buffers and contexts if not already allocated */
|
||||
if (neededBufSize > bufSize || !cBuf || !rBuf) {
|
||||
|
@ -79,11 +82,13 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
|
||||
{
|
||||
size_t const result =
|
||||
roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size);
|
||||
roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size,
|
||||
cLevel);
|
||||
FUZZ_ZASSERT(result);
|
||||
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
|
||||
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
|
||||
}
|
||||
FUZZ_dataProducer_free(producer);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeCCtx(cctx); cctx = NULL;
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -18,33 +18,37 @@
|
|||
#include <stdio.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd_helpers.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
uint32_t seed = FUZZ_seed(&src, &size);
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
FUZZ_dict_t dict;
|
||||
ZSTD_DDict* ddict = NULL;
|
||||
int i;
|
||||
|
||||
if (!dctx) {
|
||||
dctx = ZSTD_createDCtx();
|
||||
FUZZ_ASSERT(dctx);
|
||||
}
|
||||
dict = FUZZ_train(src, size, &seed);
|
||||
if (FUZZ_rand32(&seed, 0, 1) == 0) {
|
||||
dict = FUZZ_train(src, size, producer);
|
||||
if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) {
|
||||
ddict = ZSTD_createDDict(dict.buff, dict.size);
|
||||
FUZZ_ASSERT(ddict);
|
||||
} else {
|
||||
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
|
||||
dctx, dict.buff, dict.size,
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
|
||||
(ZSTD_dictContentType_e)FUZZ_rand32(&seed, 0, 2)));
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
|
||||
(ZSTD_dictContentType_e)FUZZ_dataProducer_uint32Range(producer, 0, 2)));
|
||||
}
|
||||
/* Run it 10 times over 10 output sizes. Reuse the context and dict. */
|
||||
for (i = 0; i < 10; ++i) {
|
||||
size_t const bufSize = FUZZ_rand32(&seed, 0, 2 * size);
|
||||
|
||||
{
|
||||
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
|
||||
void* rBuf = malloc(bufSize);
|
||||
FUZZ_ASSERT(rBuf);
|
||||
if (ddict) {
|
||||
|
@ -55,6 +59,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
free(rBuf);
|
||||
}
|
||||
free(dict.buff);
|
||||
FUZZ_dataProducer_free(producer);
|
||||
ZSTD_freeDDict(ddict);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -19,22 +19,21 @@
|
|||
#include <string.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd_helpers.h"
|
||||
|
||||
static const int kMaxClevel = 19;
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_CCtx *cctx = NULL;
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
static uint32_t seed;
|
||||
|
||||
static size_t roundTripTest(void *result, size_t resultCapacity,
|
||||
void *compressed, size_t compressedCapacity,
|
||||
const void *src, size_t srcSize)
|
||||
const void *src, size_t srcSize,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
ZSTD_dictContentType_e dictContentType = ZSTD_dct_auto;
|
||||
FUZZ_dict_t dict = FUZZ_train(src, srcSize, &seed);
|
||||
FUZZ_dict_t dict = FUZZ_train(src, srcSize, producer);
|
||||
size_t cSize;
|
||||
if ((FUZZ_rand(&seed) & 15) == 0) {
|
||||
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
|
||||
if (FUZZ_dataProducer_uint32Range(producer, 0, 15) == 0) {
|
||||
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
|
||||
|
||||
cSize = ZSTD_compress_usingDict(cctx,
|
||||
compressed, compressedCapacity,
|
||||
|
@ -42,20 +41,20 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
|
|||
dict.buff, dict.size,
|
||||
cLevel);
|
||||
} else {
|
||||
dictContentType = FUZZ_rand32(&seed, 0, 2);
|
||||
FUZZ_setRandomParameters(cctx, srcSize, &seed);
|
||||
dictContentType = FUZZ_dataProducer_uint32Range(producer, 0, 2);
|
||||
FUZZ_setRandomParameters(cctx, srcSize, producer);
|
||||
/* Disable checksum so we can use sizes smaller than compress bound. */
|
||||
FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 0));
|
||||
FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary_advanced(
|
||||
cctx, dict.buff, dict.size,
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
|
||||
dictContentType));
|
||||
cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize);
|
||||
}
|
||||
FUZZ_ZASSERT(cSize);
|
||||
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
|
||||
dctx, dict.buff, dict.size,
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
|
||||
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
|
||||
dictContentType));
|
||||
{
|
||||
size_t const ret = ZSTD_decompressDCtx(
|
||||
|
@ -67,17 +66,20 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
|
|||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
size_t const rBufSize = size;
|
||||
void* rBuf = malloc(rBufSize);
|
||||
size_t cBufSize = ZSTD_compressBound(size);
|
||||
void* cBuf;
|
||||
|
||||
seed = FUZZ_seed(&src, &size);
|
||||
void *cBuf;
|
||||
/* Half of the time fuzz with a 1 byte smaller output size.
|
||||
* This will still succeed because we force the checksum to be disabled,
|
||||
* giving us 4 bytes of overhead.
|
||||
*/
|
||||
cBufSize -= FUZZ_rand32(&seed, 0, 1);
|
||||
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
|
||||
cBuf = malloc(cBufSize);
|
||||
|
||||
if (!cctx) {
|
||||
|
@ -91,13 +93,14 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
|
||||
{
|
||||
size_t const result =
|
||||
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size);
|
||||
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
|
||||
FUZZ_ZASSERT(result);
|
||||
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
|
||||
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
|
||||
}
|
||||
free(rBuf);
|
||||
free(cBuf);
|
||||
FUZZ_dataProducer_free(producer);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeCCtx(cctx); cctx = NULL;
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -17,12 +17,6 @@
|
|||
* test code paths which are only executed when contexts are reused.
|
||||
* WARNING: Makes reproducing crashes much harder.
|
||||
* Default: Not defined.
|
||||
* @param FUZZ_RNG_SEED_SIZE:
|
||||
* The number of bytes of the source to look at when constructing a seed
|
||||
* for the deterministic RNG. These bytes are discarded before passing
|
||||
* the data to zstd functions. Every fuzzer initializes the RNG exactly
|
||||
* once before doing anything else, even if it is unused.
|
||||
* Default: 4.
|
||||
* @param DEBUGLEVEL:
|
||||
* This is a parameter for the zstd library. Defining `DEBUGLEVEL=1`
|
||||
* enables assert() statements in the zstd library. Higher levels enable
|
||||
|
@ -42,10 +36,6 @@
|
|||
#ifndef FUZZ_H
|
||||
#define FUZZ_H
|
||||
|
||||
#ifndef FUZZ_RNG_SEED_SIZE
|
||||
# define FUZZ_RNG_SEED_SIZE 4
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
|
|
|
@ -24,21 +24,38 @@ def abs_join(a, *p):
|
|||
return os.path.abspath(os.path.join(a, *p))
|
||||
|
||||
|
||||
class InputType(object):
|
||||
RAW_DATA = 1
|
||||
COMPRESSED_DATA = 2
|
||||
|
||||
|
||||
class FrameType(object):
|
||||
ZSTD = 1
|
||||
BLOCK = 2
|
||||
|
||||
|
||||
class TargetInfo(object):
|
||||
def __init__(self, input_type, frame_type=FrameType.ZSTD):
|
||||
self.input_type = input_type
|
||||
self.frame_type = frame_type
|
||||
|
||||
|
||||
# Constants
|
||||
FUZZ_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
CORPORA_DIR = abs_join(FUZZ_DIR, 'corpora')
|
||||
TARGETS = [
|
||||
'simple_round_trip',
|
||||
'stream_round_trip',
|
||||
'block_round_trip',
|
||||
'simple_decompress',
|
||||
'stream_decompress',
|
||||
'block_decompress',
|
||||
'dictionary_round_trip',
|
||||
'dictionary_decompress',
|
||||
'zstd_frame_info',
|
||||
'simple_compress',
|
||||
]
|
||||
TARGET_INFO = {
|
||||
'simple_round_trip': TargetInfo(InputType.RAW_DATA),
|
||||
'stream_round_trip': TargetInfo(InputType.RAW_DATA),
|
||||
'block_round_trip': TargetInfo(InputType.RAW_DATA, FrameType.BLOCK),
|
||||
'simple_decompress': TargetInfo(InputType.COMPRESSED_DATA),
|
||||
'stream_decompress': TargetInfo(InputType.COMPRESSED_DATA),
|
||||
'block_decompress': TargetInfo(InputType.COMPRESSED_DATA, FrameType.BLOCK),
|
||||
'dictionary_round_trip': TargetInfo(InputType.RAW_DATA),
|
||||
'dictionary_decompress': TargetInfo(InputType.COMPRESSED_DATA),
|
||||
'zstd_frame_info': TargetInfo(InputType.COMPRESSED_DATA),
|
||||
'simple_compress': TargetInfo(InputType.RAW_DATA),
|
||||
}
|
||||
TARGETS = list(TARGET_INFO.keys())
|
||||
ALL_TARGETS = TARGETS + ['all']
|
||||
FUZZ_RNG_SEED_SIZE = 4
|
||||
|
||||
|
@ -67,7 +84,7 @@ MSAN_EXTRA_LDFLAGS = os.environ.get('MSAN_EXTRA_LDFLAGS', '')
|
|||
def create(r):
|
||||
d = os.path.abspath(r)
|
||||
if not os.path.isdir(d):
|
||||
os.mkdir(d)
|
||||
os.makedirs(d)
|
||||
return d
|
||||
|
||||
|
||||
|
@ -158,7 +175,7 @@ def compiler_version(cc, cxx):
|
|||
assert(b'clang' in cxx_version_bytes)
|
||||
compiler = 'clang'
|
||||
elif b'gcc' in cc_version_bytes:
|
||||
assert(b'gcc' in cxx_version_bytes)
|
||||
assert(b'gcc' in cxx_version_bytes or b'g++' in cxx_version_bytes)
|
||||
compiler = 'gcc'
|
||||
if compiler is not None:
|
||||
version_regex = b'([0-9])+\.([0-9])+\.([0-9])+'
|
||||
|
@ -643,7 +660,7 @@ def gen_parser(args):
|
|||
parser.add_argument(
|
||||
'--max-size-log',
|
||||
type=int,
|
||||
default=13,
|
||||
default=18,
|
||||
help='Maximum sample size to generate')
|
||||
parser.add_argument(
|
||||
'--seed',
|
||||
|
@ -699,10 +716,11 @@ def gen(args):
|
|||
'-o{}'.format(decompressed),
|
||||
]
|
||||
|
||||
if 'block_' in args.TARGET:
|
||||
info = TARGET_INFO[args.TARGET]
|
||||
if info.frame_type == FrameType.BLOCK:
|
||||
cmd += [
|
||||
'--gen-blocks',
|
||||
'--max-block-size-log={}'.format(args.max_size_log)
|
||||
'--max-block-size-log={}'.format(min(args.max_size_log, 17))
|
||||
]
|
||||
else:
|
||||
cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
|
||||
|
@ -710,10 +728,11 @@ def gen(args):
|
|||
print(' '.join(cmd))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if '_round_trip' in args.TARGET:
|
||||
if info.input_type == InputType.RAW_DATA:
|
||||
print('using decompressed data in {}'.format(decompressed))
|
||||
samples = decompressed
|
||||
elif '_decompress' in args.TARGET:
|
||||
else:
|
||||
assert info.input_type == InputType.COMPRESSED_DATA
|
||||
print('using compressed data in {}'.format(compressed))
|
||||
samples = compressed
|
||||
|
||||
|
@ -721,10 +740,8 @@ def gen(args):
|
|||
for name in os.listdir(samples):
|
||||
samplename = abs_join(samples, name)
|
||||
outname = abs_join(seed, name)
|
||||
rng_seed = os.urandom(args.fuzz_rng_seed_size)
|
||||
with open(samplename, 'rb') as sample:
|
||||
with open(outname, 'wb') as out:
|
||||
out.write(rng_seed)
|
||||
CHUNK_SIZE = 131072
|
||||
chunk = sample.read(CHUNK_SIZE)
|
||||
while len(chunk) > 0:
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
struct FUZZ_dataProducer_s{
|
||||
const uint8_t *data;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size) {
|
||||
FUZZ_dataProducer_t *producer = malloc(sizeof(FUZZ_dataProducer_t));
|
||||
|
||||
FUZZ_ASSERT(producer != NULL);
|
||||
|
||||
producer->data = data;
|
||||
producer->size = size;
|
||||
return producer;
|
||||
}
|
||||
|
||||
void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer) { free(producer); }
|
||||
|
||||
uint32_t FUZZ_dataProducer_uint32Range(FUZZ_dataProducer_t *producer, uint32_t min,
|
||||
uint32_t max) {
|
||||
FUZZ_ASSERT(min <= max);
|
||||
|
||||
uint32_t range = max - min;
|
||||
uint32_t rolling = range;
|
||||
uint32_t result = 0;
|
||||
|
||||
while (rolling > 0 && producer->size > 0) {
|
||||
uint8_t next = *(producer->data + producer->size - 1);
|
||||
producer->size -= 1;
|
||||
result = (result << 8) | next;
|
||||
rolling >>= 8;
|
||||
}
|
||||
|
||||
if (range == 0xffffffff) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return min + result % (range + 1);
|
||||
}
|
||||
|
||||
uint32_t FUZZ_dataProducer_uint32(FUZZ_dataProducer_t *producer) {
|
||||
return FUZZ_dataProducer_uint32Range(producer, 0, 0xffffffff);
|
||||
}
|
||||
|
||||
int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer,
|
||||
int32_t min, int32_t max)
|
||||
{
|
||||
FUZZ_ASSERT(min <= max);
|
||||
|
||||
if (min < 0)
|
||||
return (int)FUZZ_dataProducer_uint32Range(producer, 0, max - min) + min;
|
||||
|
||||
return FUZZ_dataProducer_uint32Range(producer, min, max);
|
||||
}
|
||||
|
||||
size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer){
|
||||
return producer->size;
|
||||
}
|
||||
|
||||
size_t FUZZ_dataProducer_contract(FUZZ_dataProducer_t *producer, size_t newSize)
|
||||
{
|
||||
newSize = newSize > producer->size ? producer->size : newSize;
|
||||
|
||||
size_t remaining = producer->size - newSize;
|
||||
producer->data = producer->data + remaining;
|
||||
producer->size = newSize;
|
||||
return remaining;
|
||||
}
|
||||
|
||||
size_t FUZZ_dataProducer_reserveDataPrefix(FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
size_t producerSliceSize = FUZZ_dataProducer_uint32Range(
|
||||
producer, 0, producer->size);
|
||||
return FUZZ_dataProducer_contract(producer, producerSliceSize);
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
/**
|
||||
* Helper APIs for generating random data from input data stream.
|
||||
The producer reads bytes from the end of the input and appends them together
|
||||
to generate a random number in the requested range. If it runs out of input
|
||||
data, it will keep returning the same value (min) over and over again.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef FUZZ_DATA_PRODUCER_H
|
||||
#define FUZZ_DATA_PRODUCER_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "fuzz_helpers.h"
|
||||
|
||||
/* Struct used for maintaining the state of the data */
|
||||
typedef struct FUZZ_dataProducer_s FUZZ_dataProducer_t;
|
||||
|
||||
/* Returns a data producer state struct. Use for producer initialization. */
|
||||
FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size);
|
||||
|
||||
/* Frees the data producer */
|
||||
void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer);
|
||||
|
||||
/* Returns value between [min, max] */
|
||||
uint32_t FUZZ_dataProducer_uint32Range(FUZZ_dataProducer_t *producer, uint32_t min,
|
||||
uint32_t max);
|
||||
|
||||
/* Returns a uint32 value */
|
||||
uint32_t FUZZ_dataProducer_uint32(FUZZ_dataProducer_t *producer);
|
||||
|
||||
/* Returns a signed value between [min, max] */
|
||||
int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer,
|
||||
int32_t min, int32_t max);
|
||||
|
||||
/* Returns the size of the remaining bytes of data in the producer */
|
||||
size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer);
|
||||
|
||||
/* Restricts the producer to only the last newSize bytes of data.
|
||||
If newSize > current data size, nothing happens. Returns the number of bytes
|
||||
the producer won't use anymore, after contracting. */
|
||||
size_t FUZZ_dataProducer_contract(FUZZ_dataProducer_t *producer, size_t newSize);
|
||||
|
||||
/* Restricts the producer to use only the last X bytes of data, where X is
|
||||
a random number in the interval [0, data_size]. Returns the size of the
|
||||
remaining data the producer won't use anymore (the prefix). */
|
||||
size_t FUZZ_dataProducer_reserveDataPrefix(FUZZ_dataProducer_t *producer);
|
||||
#endif // FUZZ_DATA_PRODUCER_H
|
|
@ -14,6 +14,7 @@
|
|||
#ifndef FUZZ_HELPERS_H
|
||||
#define FUZZ_HELPERS_H
|
||||
|
||||
#include "debug.h"
|
||||
#include "fuzz.h"
|
||||
#include "xxhash.h"
|
||||
#include "zstd.h"
|
||||
|
@ -54,37 +55,6 @@ extern "C" {
|
|||
#define FUZZ_STATIC static
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Deterministically constructs a seed based on the fuzz input.
|
||||
* Consumes up to the first FUZZ_RNG_SEED_SIZE bytes of the input.
|
||||
*/
|
||||
FUZZ_STATIC uint32_t FUZZ_seed(uint8_t const **src, size_t* size) {
|
||||
uint8_t const *data = *src;
|
||||
size_t const toHash = MIN(FUZZ_RNG_SEED_SIZE, *size);
|
||||
*size -= toHash;
|
||||
*src += toHash;
|
||||
return XXH32(data, toHash, 0);
|
||||
}
|
||||
|
||||
#define FUZZ_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
|
||||
|
||||
FUZZ_STATIC uint32_t FUZZ_rand(uint32_t *state) {
|
||||
static const uint32_t prime1 = 2654435761U;
|
||||
static const uint32_t prime2 = 2246822519U;
|
||||
uint32_t rand32 = *state;
|
||||
rand32 *= prime1;
|
||||
rand32 += prime2;
|
||||
rand32 = FUZZ_rotl32(rand32, 13);
|
||||
*state = rand32;
|
||||
return rand32 >> 5;
|
||||
}
|
||||
|
||||
/* Returns a random numer in the range [min, max]. */
|
||||
FUZZ_STATIC uint32_t FUZZ_rand32(uint32_t *state, uint32_t min, uint32_t max) {
|
||||
uint32_t random = FUZZ_rand(state);
|
||||
return min + (random % (max - min + 1));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,7 @@ int main(int argc, char const **argv) {
|
|||
fprintf(stderr, "WARNING: No files passed to %s\n", argv[0]);
|
||||
for (i = 0; i < numFiles; ++i) {
|
||||
char const *fileName = files[i];
|
||||
DEBUGLOG(3, "Running %s", fileName);
|
||||
size_t const fileSize = UTIL_getFileSize(fileName);
|
||||
size_t readSize;
|
||||
FILE *file;
|
||||
|
|
|
@ -18,28 +18,33 @@
|
|||
#include <stdio.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd.h"
|
||||
#include "zstd_helpers.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_CCtx *cctx = NULL;
|
||||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
uint32_t seed = FUZZ_seed(&src, &size);
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
size_t const maxSize = ZSTD_compressBound(size);
|
||||
int i;
|
||||
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, maxSize);
|
||||
|
||||
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
|
||||
|
||||
if (!cctx) {
|
||||
cctx = ZSTD_createCCtx();
|
||||
FUZZ_ASSERT(cctx);
|
||||
}
|
||||
/* Run it 10 times over 10 output sizes. Reuse the context. */
|
||||
for (i = 0; i < 10; ++i) {
|
||||
int const level = (int)FUZZ_rand32(&seed, 0, 19 + 3) - 3; /* [-3, 19] */
|
||||
size_t const bufSize = FUZZ_rand32(&seed, 0, maxSize);
|
||||
void* rBuf = malloc(bufSize);
|
||||
FUZZ_ASSERT(rBuf);
|
||||
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, level);
|
||||
free(rBuf);
|
||||
}
|
||||
|
||||
void *rBuf = malloc(bufSize);
|
||||
FUZZ_ASSERT(rBuf);
|
||||
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, cLevel);
|
||||
free(rBuf);
|
||||
FUZZ_dataProducer_free(producer);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeCCtx(cctx); cctx = NULL;
|
||||
#endif
|
||||
|
|
|
@ -17,26 +17,30 @@
|
|||
#include <stdio.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
uint32_t seed = FUZZ_seed(&src, &size);
|
||||
int i;
|
||||
if (!dctx) {
|
||||
dctx = ZSTD_createDCtx();
|
||||
FUZZ_ASSERT(dctx);
|
||||
}
|
||||
/* Run it 10 times over 10 output sizes. Reuse the context. */
|
||||
for (i = 0; i < 10; ++i) {
|
||||
size_t const bufSize = FUZZ_rand32(&seed, 0, 2 * size);
|
||||
void* rBuf = malloc(bufSize);
|
||||
FUZZ_ASSERT(rBuf);
|
||||
ZSTD_decompressDCtx(dctx, rBuf, bufSize, src, size);
|
||||
free(rBuf);
|
||||
}
|
||||
|
||||
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
|
||||
void *rBuf = malloc(bufSize);
|
||||
FUZZ_ASSERT(rBuf);
|
||||
|
||||
ZSTD_decompressDCtx(dctx, rBuf, bufSize, src, size);
|
||||
free(rBuf);
|
||||
|
||||
FUZZ_dataProducer_free(producer);
|
||||
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -20,23 +20,23 @@
|
|||
#include <string.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd_helpers.h"
|
||||
|
||||
static const int kMaxClevel = 19;
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static ZSTD_CCtx *cctx = NULL;
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
static uint32_t seed;
|
||||
|
||||
static size_t roundTripTest(void *result, size_t resultCapacity,
|
||||
void *compressed, size_t compressedCapacity,
|
||||
const void *src, size_t srcSize)
|
||||
const void *src, size_t srcSize,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
size_t cSize;
|
||||
if (FUZZ_rand(&seed) & 1) {
|
||||
FUZZ_setRandomParameters(cctx, srcSize, &seed);
|
||||
if (FUZZ_dataProducer_uint32Range(producer, 0, 1)) {
|
||||
FUZZ_setRandomParameters(cctx, srcSize, producer);
|
||||
cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize);
|
||||
} else {
|
||||
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
|
||||
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
|
||||
|
||||
cSize = ZSTD_compressCCtx(
|
||||
cctx, compressed, compressedCapacity, src, srcSize, cLevel);
|
||||
}
|
||||
|
@ -51,12 +51,17 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
size_t cBufSize = ZSTD_compressBound(size);
|
||||
void* cBuf;
|
||||
|
||||
seed = FUZZ_seed(&src, &size);
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
/* Half of the time fuzz with a 1 byte smaller output size.
|
||||
* This will still succeed because we don't use a dictionary, so the dictID
|
||||
* field is empty, giving us 4 bytes of overhead.
|
||||
*/
|
||||
cBufSize -= FUZZ_rand32(&seed, 0, 1);
|
||||
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
|
||||
|
||||
cBuf = malloc(cBufSize);
|
||||
|
||||
FUZZ_ASSERT(cBuf && rBuf);
|
||||
|
@ -72,13 +77,14 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
|
||||
{
|
||||
size_t const result =
|
||||
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size);
|
||||
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
|
||||
FUZZ_ZASSERT(result);
|
||||
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
|
||||
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
|
||||
}
|
||||
free(rBuf);
|
||||
free(cBuf);
|
||||
FUZZ_dataProducer_free(producer);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeCCtx(cctx); cctx = NULL;
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <stdio.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
static size_t const kBufSize = ZSTD_BLOCKSIZE_MAX;
|
||||
|
||||
|
@ -26,22 +27,23 @@ static ZSTD_DStream *dstream = NULL;
|
|||
static void* buf = NULL;
|
||||
uint32_t seed;
|
||||
|
||||
static ZSTD_outBuffer makeOutBuffer(void)
|
||||
static ZSTD_outBuffer makeOutBuffer(FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
ZSTD_outBuffer buffer = { buf, 0, 0 };
|
||||
|
||||
buffer.size = (FUZZ_rand(&seed) % kBufSize) + 1;
|
||||
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, kBufSize));
|
||||
FUZZ_ASSERT(buffer.size <= kBufSize);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
|
||||
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
ZSTD_inBuffer buffer = { *src, 0, 0 };
|
||||
|
||||
FUZZ_ASSERT(*size > 0);
|
||||
buffer.size = (FUZZ_rand(&seed) % *size) + 1;
|
||||
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, *size));
|
||||
FUZZ_ASSERT(buffer.size <= *size);
|
||||
*src += buffer.size;
|
||||
*size -= buffer.size;
|
||||
|
@ -51,13 +53,16 @@ static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
|
|||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
seed = FUZZ_seed(&src, &size);
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
/* Allocate all buffers and contexts if not already allocated */
|
||||
if (!buf) {
|
||||
buf = malloc(kBufSize);
|
||||
FUZZ_ASSERT(buf);
|
||||
}
|
||||
FUZZ_ASSERT(buf);
|
||||
}
|
||||
|
||||
if (!dstream) {
|
||||
dstream = ZSTD_createDStream();
|
||||
|
@ -67,9 +72,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
}
|
||||
|
||||
while (size > 0) {
|
||||
ZSTD_inBuffer in = makeInBuffer(&src, &size);
|
||||
ZSTD_inBuffer in = makeInBuffer(&src, &size, producer);
|
||||
while (in.pos != in.size) {
|
||||
ZSTD_outBuffer out = makeOutBuffer();
|
||||
ZSTD_outBuffer out = makeOutBuffer(producer);
|
||||
size_t const rc = ZSTD_decompressStream(dstream, &out, &in);
|
||||
if (ZSTD_isError(rc)) goto error;
|
||||
}
|
||||
|
@ -79,5 +84,6 @@ error:
|
|||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeDStream(dstream); dstream = NULL;
|
||||
#endif
|
||||
FUZZ_dataProducer_free(producer);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -20,31 +20,33 @@
|
|||
#include <string.h>
|
||||
#include "fuzz_helpers.h"
|
||||
#include "zstd_helpers.h"
|
||||
#include "fuzz_data_producer.h"
|
||||
|
||||
ZSTD_CCtx *cctx = NULL;
|
||||
static ZSTD_DCtx *dctx = NULL;
|
||||
static uint8_t* cBuf = NULL;
|
||||
static uint8_t* rBuf = NULL;
|
||||
static size_t bufSize = 0;
|
||||
static uint32_t seed;
|
||||
|
||||
static ZSTD_outBuffer makeOutBuffer(uint8_t *dst, size_t capacity)
|
||||
static ZSTD_outBuffer makeOutBuffer(uint8_t *dst, size_t capacity,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
ZSTD_outBuffer buffer = { dst, 0, 0 };
|
||||
|
||||
FUZZ_ASSERT(capacity > 0);
|
||||
buffer.size = (FUZZ_rand(&seed) % capacity) + 1;
|
||||
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, capacity));
|
||||
FUZZ_ASSERT(buffer.size <= capacity);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
|
||||
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
ZSTD_inBuffer buffer = { *src, 0, 0 };
|
||||
|
||||
FUZZ_ASSERT(*size > 0);
|
||||
buffer.size = (FUZZ_rand(&seed) % *size) + 1;
|
||||
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, *size));
|
||||
FUZZ_ASSERT(buffer.size <= *size);
|
||||
*src += buffer.size;
|
||||
*size -= buffer.size;
|
||||
|
@ -53,23 +55,24 @@ static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
|
|||
}
|
||||
|
||||
static size_t compress(uint8_t *dst, size_t capacity,
|
||||
const uint8_t *src, size_t srcSize)
|
||||
const uint8_t *src, size_t srcSize,
|
||||
FUZZ_dataProducer_t *producer)
|
||||
{
|
||||
size_t dstSize = 0;
|
||||
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
|
||||
FUZZ_setRandomParameters(cctx, srcSize, &seed);
|
||||
FUZZ_setRandomParameters(cctx, srcSize, producer);
|
||||
|
||||
while (srcSize > 0) {
|
||||
ZSTD_inBuffer in = makeInBuffer(&src, &srcSize);
|
||||
ZSTD_inBuffer in = makeInBuffer(&src, &srcSize, producer);
|
||||
/* Mode controls the action. If mode == -1 we pick a new mode */
|
||||
int mode = -1;
|
||||
while (in.pos < in.size || mode != -1) {
|
||||
ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
|
||||
ZSTD_outBuffer out = makeOutBuffer(dst, capacity, producer);
|
||||
/* Previous action finished, pick a new mode. */
|
||||
if (mode == -1) mode = FUZZ_rand(&seed) % 10;
|
||||
if (mode == -1) mode = FUZZ_dataProducer_uint32Range(producer, 0, 9);
|
||||
switch (mode) {
|
||||
case 0: /* fall-though */
|
||||
case 1: /* fall-though */
|
||||
case 0: /* fall-through */
|
||||
case 1: /* fall-through */
|
||||
case 2: {
|
||||
size_t const ret =
|
||||
ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush);
|
||||
|
@ -85,9 +88,9 @@ static size_t compress(uint8_t *dst, size_t capacity,
|
|||
/* Reset the compressor when the frame is finished */
|
||||
if (ret == 0) {
|
||||
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
|
||||
if ((FUZZ_rand(&seed) & 7) == 0) {
|
||||
if (FUZZ_dataProducer_uint32Range(producer, 0, 7) == 0) {
|
||||
size_t const remaining = in.size - in.pos;
|
||||
FUZZ_setRandomParameters(cctx, remaining, &seed);
|
||||
FUZZ_setRandomParameters(cctx, remaining, producer);
|
||||
}
|
||||
mode = -1;
|
||||
}
|
||||
|
@ -107,7 +110,7 @@ static size_t compress(uint8_t *dst, size_t capacity,
|
|||
}
|
||||
for (;;) {
|
||||
ZSTD_inBuffer in = {NULL, 0, 0};
|
||||
ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
|
||||
ZSTD_outBuffer out = makeOutBuffer(dst, capacity, producer);
|
||||
size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end);
|
||||
FUZZ_ZASSERT(ret);
|
||||
|
||||
|
@ -122,10 +125,13 @@ static size_t compress(uint8_t *dst, size_t capacity,
|
|||
|
||||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
size_t neededBufSize;
|
||||
/* Give a random portion of src data to the producer, to use for
|
||||
parameter generation. The rest will be used for (de)compression */
|
||||
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
|
||||
size = FUZZ_dataProducer_reserveDataPrefix(producer);
|
||||
|
||||
seed = FUZZ_seed(&src, &size);
|
||||
neededBufSize = ZSTD_compressBound(size) * 2;
|
||||
size_t neededBufSize;
|
||||
neededBufSize = ZSTD_compressBound(size) * 5;
|
||||
|
||||
/* Allocate all buffers and contexts if not already allocated */
|
||||
if (neededBufSize > bufSize) {
|
||||
|
@ -146,7 +152,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
}
|
||||
|
||||
{
|
||||
size_t const cSize = compress(cBuf, neededBufSize, src, size);
|
||||
size_t const cSize = compress(cBuf, neededBufSize, src, size, producer);
|
||||
size_t const rSize =
|
||||
ZSTD_decompressDCtx(dctx, rBuf, neededBufSize, cBuf, cSize);
|
||||
FUZZ_ZASSERT(rSize);
|
||||
|
@ -154,6 +160,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
|||
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
|
||||
}
|
||||
|
||||
FUZZ_dataProducer_free(producer);
|
||||
#ifndef STATEFUL_FUZZING
|
||||
ZSTD_freeCCtx(cctx); cctx = NULL;
|
||||
ZSTD_freeDCtx(dctx); dctx = NULL;
|
||||
|
|
|
@ -21,10 +21,6 @@
|
|||
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
|
||||
{
|
||||
ZSTD_frameHeader zfh;
|
||||
/* Consume the seed to be compatible with the corpora of other decompression
|
||||
* fuzzers.
|
||||
*/
|
||||
FUZZ_seed(&src, &size);
|
||||
/* You can fuzz any helper functions here that are fast, and take zstd
|
||||
* compressed data as input. E.g. don't expect the input to be a dictionary,
|
||||
* so don't fuzz ZSTD_getDictID_fromDict().
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue