From db9e73cb0762d81e4463bf7951a2a3389bded053 Mon Sep 17 00:00:00 2001 From: "W. Felix Handte" Date: Tue, 8 Dec 2020 11:54:57 -0500 Subject: [PATCH] Don't ASAN-Poison Statically-Allocated Workspaces Addresses #2286. --- lib/compress/zstd_compress.c | 28 ++++++++++++++-------------- lib/compress/zstd_cwksp.h | 32 ++++++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index f6faca71..ee6a92d5 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -109,7 +109,7 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ - ZSTD_cwksp_init(&ws, workspace, workspaceSize); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */); cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); if (cctx == NULL) return NULL; @@ -457,12 +457,12 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.lowerBound = (int)ZSTD_bm_buffered; bounds.upperBound = (int)ZSTD_bm_stable; return bounds; - + case ZSTD_c_blockDelimiters: bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; return bounds; - + case ZSTD_c_validateSequences: bounds.lowerBound = 0; bounds.upperBound = 1; @@ -781,12 +781,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, BOUNDCHECK(ZSTD_c_stableOutBuffer, value); CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; return CCtxParams->outBufferMode; - + case ZSTD_c_blockDelimiters: BOUNDCHECK(ZSTD_c_blockDelimiters, value); CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; return CCtxParams->blockDelimiters; - + case ZSTD_c_validateSequences: BOUNDCHECK(ZSTD_c_validateSequences, value); CCtxParams->validateSequences = value; @@ -3692,7 +3692,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, return NULL; } - ZSTD_cwksp_init(&ws, workspace, workspaceSize); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, 0 /* not static */); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); assert(cdict != NULL); @@ -3836,7 +3836,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( { ZSTD_cwksp ws; - ZSTD_cwksp_init(&ws, workspace, workspaceSize); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); if (cdict == NULL) return NULL; ZSTD_cwksp_move(&cdict->workspace, &ws); @@ -4635,10 +4635,10 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS /* Returns the number of bytes to move the current read position back by. Only non-zero * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something * went wrong. - * + * * This function will attempt to scan through blockSize bytes represented by the sequences - * in inSeqs, storing any (partial) sequences. - * + * in inSeqs, storing any (partial) sequences. + * * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to * avoid splitting a match, or to avoid splitting a match such that it would produce a match * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. @@ -4659,7 +4659,7 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq U32 matchLength; U32 rawOffset; U32 offCode; - + if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -4793,7 +4793,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, size_t compressedSeqsSize; size_t remaining = srcSize; ZSTD_sequencePosition seqPos = {0, 0, 0}; - + BYTE const* ip = (BYTE const*)src; BYTE* op = (BYTE*)dst; ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); @@ -4879,7 +4879,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, cSize += cBlockSize; DEBUGLOG(4, "cSize running total: %zu", cSize); - + if (lastBlock) { break; } else { @@ -4890,7 +4890,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, cctx->isFirstBlock = 0; } } - + return cSize; } diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h index 5d07352f..3e9bb7ee 100644 --- a/lib/compress/zstd_cwksp.h +++ b/lib/compress/zstd_cwksp.h @@ -137,7 +137,8 @@ typedef struct { void* tableValidEnd; void* allocStart; - int allocFailed; + BYTE allocFailed; + BYTE isStatic; int workspaceOversizedDuration; ZSTD_cwksp_alloc_phase_e phase; } ZSTD_cwksp; @@ -256,7 +257,9 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal( /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - __asan_unpoison_memory_region(alloc, bytes); + if (!ws->isStatic) { + __asan_unpoison_memory_region(alloc, bytes); + } #endif return alloc; @@ -302,7 +305,9 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { ws->tableEnd = end; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - __asan_unpoison_memory_region(alloc, bytes); + if (!ws->isStatic) { + __asan_unpoison_memory_region(alloc, bytes); + } #endif return alloc; @@ -341,7 +346,9 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - __asan_unpoison_memory_region(alloc, bytes); + if (!ws->isStatic) { + __asan_unpoison_memory_region(alloc, bytes); + } #endif return alloc; @@ -398,7 +405,11 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing tables!"); #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - { + /* We don't do this when the workspace is statically allocated, because + * when that is the case, we have no capability to hook into the end of the + * workspace's lifecycle to unpoison the memory. + */ + if (!ws->isStatic) { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } @@ -427,7 +438,11 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { #endif #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - { + /* We don't do this when the workspace is statically allocated, because + * when that is the case, we have no capability to hook into the end of the + * workspace's lifecycle to unpoison the memory. + */ + if (!ws->isStatic) { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } @@ -447,7 +462,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { * Any existing values in the workspace are ignored (the previously managed * buffer, if present, must be separately freed). */ -MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) { +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, int isStatic) { DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ ws->workspace = start; @@ -455,6 +470,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) { ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; ws->phase = ZSTD_cwksp_alloc_objects; + ws->isStatic = !!isStatic; ZSTD_cwksp_clear(ws); ws->workspaceOversizedDuration = 0; ZSTD_cwksp_assert_internal_consistency(ws); @@ -464,7 +480,7 @@ MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem void* workspace = ZSTD_customMalloc(size, customMem); DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); - ZSTD_cwksp_init(ws, workspace, size); + ZSTD_cwksp_init(ws, workspace, size, 0 /* not static */); return 0; }