[fuzz] Add dictionary_loader fuzzer

* Adds the fuzzer
* Adds an additional `InputType` for the fuzzer

I ran the fuzzer for about 10 minutes and it found 2 bugs:

* Catches the original bug without any help
* Catches an additional bug with 8-byte dictionaries
dev
Nick Terrell 2019-11-01 15:16:24 -07:00
parent d770a2a89f
commit 75e7c0d107
3 changed files with 161 additions and 36 deletions

View File

@ -73,7 +73,8 @@ FUZZ_TARGETS := \
dictionary_round_trip \
dictionary_decompress \
zstd_frame_info \
simple_compress
simple_compress \
dict_loader
all: $(FUZZ_TARGETS)
@ -110,6 +111,9 @@ simple_compress: $(FUZZ_HEADERS) $(FUZZ_OBJ) simple_compress.o
zstd_frame_info: $(FUZZ_HEADERS) $(FUZZ_OBJ) zstd_frame_info.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) zstd_frame_info.o $(LIB_FUZZING_ENGINE) -o $@
dictionary_loader: $(FUZZ_HEADERS) $(FUZZ_OBJ) dictionary_loader.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) dictionary_loader.o $(LIB_FUZZING_ENGINE) -o $@
libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h $(PRGDIR)/util.c regression_driver.o
$(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*/
/**
* This fuzz target makes sure that whenever a compression dictionary can be
* loaded, the data can be round tripped.
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
/**
* Compresses the data and returns the compressed size or an error.
*/
static size_t compress(void* compressed, size_t compressedCapacity,
void const* source, size_t sourceSize,
void const* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
ZSTD_CCtx* cctx = ZSTD_createCCtx();
FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, dictLoadMethod, dictContentType));
size_t const compressedSize = ZSTD_compress2(
cctx, compressed, compressedCapacity, source, sourceSize);
ZSTD_freeCCtx(cctx);
return compressedSize;
}
static size_t decompress(void* result, size_t resultCapacity,
void const* compressed, size_t compressedSize,
void const* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
ZSTD_DCtx* dctx = ZSTD_createDCtx();
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
dctx, dict, dictSize, dictLoadMethod, dictContentType));
size_t const resultSize = ZSTD_decompressDCtx(
dctx, result, resultCapacity, compressed, compressedSize);
FUZZ_ZASSERT(resultSize);
ZSTD_freeDCtx(dctx);
return resultSize;
}
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
ZSTD_dictLoadMethod_e const dlm =
size = FUZZ_dataProducer_uint32Range(producer, 0, 1);
ZSTD_dictContentType_e const dct =
FUZZ_dataProducer_uint32Range(producer, 0, 2);
size = FUZZ_dataProducer_remainingBytes(producer);
DEBUGLOG(2, "Dict load method %d", dlm);
DEBUGLOG(2, "Dict content type %d", dct);
DEBUGLOG(2, "Dict size %u", (unsigned)size);
void* const rBuf = malloc(size);
FUZZ_ASSERT(rBuf);
size_t const cBufSize = ZSTD_compressBound(size);
void* const cBuf = malloc(cBufSize);
FUZZ_ASSERT(cBuf);
size_t const cSize =
compress(cBuf, cBufSize, src, size, src, size, dlm, dct);
/* compression failing is okay */
if (ZSTD_isError(cSize)) {
FUZZ_ASSERT_MSG(dct != ZSTD_dct_rawContent, "Raw must always succeed!");
goto out;
}
size_t const rSize =
decompress(rBuf, size, cBuf, cSize, src, size, dlm, dct);
FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
out:
free(cBuf);
free(rBuf);
FUZZ_dataProducer_free(producer);
return 0;
}

View File

@ -27,6 +27,7 @@ def abs_join(a, *p):
class InputType(object):
RAW_DATA = 1
COMPRESSED_DATA = 2
DICTIONARY_DATA = 3
class FrameType(object):
@ -54,6 +55,7 @@ TARGET_INFO = {
'dictionary_decompress': TargetInfo(InputType.COMPRESSED_DATA),
'zstd_frame_info': TargetInfo(InputType.COMPRESSED_DATA),
'simple_compress': TargetInfo(InputType.RAW_DATA),
'dictionary_loader': TargetInfo(InputType.DICTIONARY_DATA),
}
TARGETS = list(TARGET_INFO.keys())
ALL_TARGETS = TARGETS + ['all']
@ -73,6 +75,7 @@ LIB_FUZZING_ENGINE = os.environ.get('LIB_FUZZING_ENGINE', 'libregression.a')
AFL_FUZZ = os.environ.get('AFL_FUZZ', 'afl-fuzz')
DECODECORPUS = os.environ.get('DECODECORPUS',
abs_join(FUZZ_DIR, '..', 'decodecorpus'))
ZSTD = os.environ.get('ZSTD', abs_join(FUZZ_DIR, '..', '..', 'zstd'))
# Sanitizer environment variables
MSAN_EXTRA_CPPFLAGS = os.environ.get('MSAN_EXTRA_CPPFLAGS', '')
@ -673,6 +676,11 @@ def gen_parser(args):
default=DECODECORPUS,
help="decodecorpus binary (default: $DECODECORPUS='{}')".format(
DECODECORPUS))
parser.add_argument(
'--zstd',
type=str,
default=ZSTD,
help="zstd binary (default: $ZSTD='{}')".format(ZSTD))
parser.add_argument(
'--fuzz-rng-seed-size',
type=int,
@ -707,46 +715,66 @@ def gen(args):
return 1
seed = create(args.seed)
with tmpdir() as compressed:
with tmpdir() as decompressed:
cmd = [
args.decodecorpus,
'-n{}'.format(args.number),
'-p{}/'.format(compressed),
'-o{}'.format(decompressed),
with tmpdir() as compressed, tmpdir() as decompressed, tmpdir() as dict:
info = TARGET_INFO[args.TARGET]
if info.input_type == InputType.DICTIONARY_DATA:
number = max(args.number, 1000)
else:
number = args.number
cmd = [
args.decodecorpus,
'-n{}'.format(args.number),
'-p{}/'.format(compressed),
'-o{}'.format(decompressed),
]
if info.frame_type == FrameType.BLOCK:
cmd += [
'--gen-blocks',
'--max-block-size-log={}'.format(min(args.max_size_log, 17))
]
else:
cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
info = TARGET_INFO[args.TARGET]
if info.frame_type == FrameType.BLOCK:
cmd += [
'--gen-blocks',
'--max-block-size-log={}'.format(min(args.max_size_log, 17))
print(' '.join(cmd))
subprocess.check_call(cmd)
if info.input_type == InputType.RAW_DATA:
print('using decompressed data in {}'.format(decompressed))
samples = decompressed
elif info.input_type == InputType.COMPRESSED_DATA:
print('using compressed data in {}'.format(compressed))
samples = compressed
else:
assert info.input_type == InputType.DICTIONARY_DATA
print('making dictionary data from {}'.format(decompressed))
samples = dict
min_dict_size_log = 9
max_dict_size_log = max(min_dict_size_log + 1, args.max_size_log)
for dict_size_log in range(min_dict_size_log, max_dict_size_log):
dict_size = 1 << dict_size_log
cmd = [
args.zstd,
'--train',
'-r', decompressed,
'--maxdict={}'.format(dict_size),
'-o', abs_join(dict, '{}.zstd-dict'.format(dict_size))
]
else:
cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
print(' '.join(cmd))
subprocess.check_call(cmd)
print(' '.join(cmd))
subprocess.check_call(cmd)
if info.input_type == InputType.RAW_DATA:
print('using decompressed data in {}'.format(decompressed))
samples = decompressed
else:
assert info.input_type == InputType.COMPRESSED_DATA
print('using compressed data in {}'.format(compressed))
samples = compressed
# Copy the samples over and prepend the RNG seeds
for name in os.listdir(samples):
samplename = abs_join(samples, name)
outname = abs_join(seed, name)
with open(samplename, 'rb') as sample:
with open(outname, 'wb') as out:
CHUNK_SIZE = 131072
# Copy the samples over and prepend the RNG seeds
for name in os.listdir(samples):
samplename = abs_join(samples, name)
outname = abs_join(seed, name)
with open(samplename, 'rb') as sample:
with open(outname, 'wb') as out:
CHUNK_SIZE = 131072
chunk = sample.read(CHUNK_SIZE)
while len(chunk) > 0:
out.write(chunk)
chunk = sample.read(CHUNK_SIZE)
while len(chunk) > 0:
out.write(chunk)
chunk = sample.read(CHUNK_SIZE)
return 0