2017-08-18 16:52:05 -07:00
/*
2017-01-20 14:00:41 -08:00
* Copyright ( c ) 2016 - present , Yann Collet , Facebook , Inc .
* All rights reserved .
*
2017-08-18 16:52:05 -07:00
* This source code is licensed under both the BSD - style license ( found in the
* LICENSE file in the root directory of this source tree ) and the GPLv2 ( found
* in the COPYING file in the root directory of this source tree ) .
2017-09-08 00:09:23 -07:00
* You may select , at your option , one of the above - listed licenses .
2017-01-20 14:00:41 -08:00
*/
2017-01-19 15:32:07 -08:00
/* ====== Tuning parameters ====== */
2017-09-28 13:49:12 -07:00
# define ZSTDMT_NBTHREADS_MAX 200
2018-01-17 12:39:58 -08:00
# define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (2 GB)) /* note : limited by `jobSize` type, which is `unsigned` */
2017-07-13 02:22:58 -07:00
# define ZSTDMT_OVERLAPLOG_DEFAULT 6
2017-01-21 22:06:49 -08:00
2017-01-20 12:23:30 -08:00
/* ====== Compiler specifics ====== */
# if defined(_MSC_VER)
2017-06-02 18:20:48 -07:00
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
2017-01-19 15:32:07 -08:00
# endif
2017-01-20 12:23:30 -08:00
/* ====== Dependencies ====== */
2017-06-02 18:20:48 -07:00
# include <string.h> /* memcpy, memset */
2018-01-13 13:18:57 -08:00
# include <limits.h> /* INT_MAX */
2017-06-02 18:20:48 -07:00
# include "pool.h" /* threadpool */
# include "threading.h" /* mutex */
2017-11-07 16:15:23 -08:00
# include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
2016-12-26 22:19:36 -08:00
# include "zstdmt_compress.h"
2017-01-20 12:23:30 -08:00
/* ====== Debug ====== */
2017-06-03 01:15:02 -07:00
# if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
2017-01-17 15:31:16 -08:00
2016-12-26 22:19:36 -08:00
# include <stdio.h>
2016-12-28 16:24:01 -08:00
# include <unistd.h>
# include <sys / times.h>
2017-06-03 01:15:02 -07:00
# define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
2017-01-19 10:18:17 -08:00
2017-05-30 16:12:06 -07:00
# define DEBUG_PRINTHEX(l,p,n) { \
unsigned debug_u ; \
for ( debug_u = 0 ; debug_u < ( n ) ; debug_u + + ) \
2017-01-19 10:18:17 -08:00
DEBUGLOGRAW ( l , " %02X " , ( ( const unsigned char * ) ( p ) ) [ debug_u ] ) ; \
2017-05-30 16:12:06 -07:00
DEBUGLOGRAW ( l , " \n " ) ; \
2017-01-19 10:18:17 -08:00
}
2016-12-28 16:24:01 -08:00
2017-03-30 16:47:19 -07:00
static unsigned long long GetCurrentClockTimeMicroseconds ( void )
2016-12-28 16:24:01 -08:00
{
static clock_t _ticksPerSecond = 0 ;
if ( _ticksPerSecond < = 0 ) _ticksPerSecond = sysconf ( _SC_CLK_TCK ) ;
2017-03-30 16:47:19 -07:00
{ struct tms junk ; clock_t newTicks = ( clock_t ) times ( & junk ) ;
return ( ( ( ( unsigned long long ) newTicks ) * ( 1000000 ) ) / _ticksPerSecond ) ; }
2016-12-28 16:24:01 -08:00
}
2017-07-01 06:59:24 -07:00
# define MUTEX_WAIT_TIME_DLEVEL 6
2017-09-27 16:39:40 -07:00
# define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
2017-09-26 17:57:38 -07:00
if ( ZSTD_DEBUG > = MUTEX_WAIT_TIME_DLEVEL ) { \
2017-05-30 16:12:06 -07:00
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds ( ) ; \
2017-09-27 16:39:40 -07:00
ZSTD_pthread_mutex_lock ( mutex ) ; \
2017-05-30 16:12:06 -07:00
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds ( ) ; \
unsigned long long const elapsedTime = ( afterTime - beforeTime ) ; \
if ( elapsedTime > 1000 ) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
DEBUGLOG ( MUTEX_WAIT_TIME_DLEVEL , " Thread took %llu microseconds to acquire mutex %s \n " , \
elapsedTime , # mutex ) ; \
} } \
2017-09-26 17:57:38 -07:00
} else { \
2017-09-27 16:39:40 -07:00
ZSTD_pthread_mutex_lock ( mutex ) ; \
2017-09-26 17:57:38 -07:00
} \
2017-05-30 16:12:06 -07:00
}
2016-12-28 16:24:01 -08:00
2016-12-26 22:19:36 -08:00
# else
2016-12-28 16:24:01 -08:00
2017-09-27 11:16:24 -07:00
# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
2017-01-19 10:18:17 -08:00
# define DEBUG_PRINTHEX(l,p,n) {}
2016-12-28 16:24:01 -08:00
2016-12-26 22:19:36 -08:00
# endif
2016-12-28 16:24:01 -08:00
2017-01-11 16:25:46 -08:00
/* ===== Buffer Pool ===== */
2017-07-11 14:14:07 -07:00
/* a single Buffer Pool can be invoked from multiple threads in parallel */
2016-12-26 22:19:36 -08:00
typedef struct buffer_s {
void * start ;
2018-01-26 13:00:14 -08:00
size_t capacity ;
2016-12-26 22:19:36 -08:00
} buffer_t ;
2017-01-18 11:57:34 -08:00
static const buffer_t g_nullBuffer = { NULL , 0 } ;
2017-01-17 17:46:33 -08:00
2016-12-26 22:19:36 -08:00
typedef struct ZSTDMT_bufferPool_s {
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_t poolMutex ;
2017-07-11 15:17:25 -07:00
size_t bufferSize ;
2017-01-20 12:23:30 -08:00
unsigned totalBuffers ;
2016-12-26 22:19:36 -08:00
unsigned nbBuffers ;
2017-05-30 16:12:06 -07:00
ZSTD_customMem cMem ;
2016-12-31 05:45:33 -08:00
buffer_t bTable [ 1 ] ; /* variable size */
2016-12-26 22:19:36 -08:00
} ZSTDMT_bufferPool ;
2017-05-30 16:12:06 -07:00
static ZSTDMT_bufferPool * ZSTDMT_createBufferPool ( unsigned nbThreads , ZSTD_customMem cMem )
2016-12-31 05:45:33 -08:00
{
2017-07-12 14:23:34 -07:00
unsigned const maxNbBuffers = 2 * nbThreads + 3 ;
2017-05-30 16:12:06 -07:00
ZSTDMT_bufferPool * const bufPool = ( ZSTDMT_bufferPool * ) ZSTD_calloc (
sizeof ( ZSTDMT_bufferPool ) + ( maxNbBuffers - 1 ) * sizeof ( buffer_t ) , cMem ) ;
2016-12-31 05:45:33 -08:00
if ( bufPool = = NULL ) return NULL ;
2017-09-27 11:16:24 -07:00
if ( ZSTD_pthread_mutex_init ( & bufPool - > poolMutex , NULL ) ) {
2017-07-19 01:05:40 -07:00
ZSTD_free ( bufPool , cMem ) ;
return NULL ;
}
2017-07-11 15:17:25 -07:00
bufPool - > bufferSize = 64 KB ;
2016-12-31 05:45:33 -08:00
bufPool - > totalBuffers = maxNbBuffers ;
2017-01-21 22:06:49 -08:00
bufPool - > nbBuffers = 0 ;
2017-05-30 16:12:06 -07:00
bufPool - > cMem = cMem ;
2016-12-31 05:45:33 -08:00
return bufPool ;
}
static void ZSTDMT_freeBufferPool ( ZSTDMT_bufferPool * bufPool )
{
unsigned u ;
2017-10-02 17:28:57 -07:00
DEBUGLOG ( 3 , " ZSTDMT_freeBufferPool (address:%08X) " , ( U32 ) ( size_t ) bufPool ) ;
2016-12-31 05:45:33 -08:00
if ( ! bufPool ) return ; /* compatibility with free on NULL */
2017-09-28 23:01:31 -07:00
for ( u = 0 ; u < bufPool - > totalBuffers ; u + + ) {
2017-10-02 17:28:57 -07:00
DEBUGLOG ( 4 , " free buffer %2u (address:%08X) " , u , ( U32 ) ( size_t ) bufPool - > bTable [ u ] . start ) ;
2017-05-30 16:12:06 -07:00
ZSTD_free ( bufPool - > bTable [ u ] . start , bufPool - > cMem ) ;
2017-09-28 23:01:31 -07:00
}
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_destroy ( & bufPool - > poolMutex ) ;
2017-05-30 16:12:06 -07:00
ZSTD_free ( bufPool , bufPool - > cMem ) ;
2016-12-31 05:45:33 -08:00
}
2017-06-01 17:56:14 -07:00
/* only works at initialization, not during compression */
static size_t ZSTDMT_sizeof_bufferPool ( ZSTDMT_bufferPool * bufPool )
{
size_t const poolSize = sizeof ( * bufPool )
2018-01-13 13:18:57 -08:00
+ ( bufPool - > totalBuffers - 1 ) * sizeof ( buffer_t ) ;
2017-06-01 17:56:14 -07:00
unsigned u ;
size_t totalBufferSize = 0 ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & bufPool - > poolMutex ) ;
2017-06-01 17:56:14 -07:00
for ( u = 0 ; u < bufPool - > totalBuffers ; u + + )
2018-01-26 13:00:14 -08:00
totalBufferSize + = bufPool - > bTable [ u ] . capacity ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2017-06-01 17:56:14 -07:00
return poolSize + totalBufferSize ;
}
2018-01-26 11:06:34 -08:00
/* ZSTDMT_setBufferSize() :
* all future buffers provided by this buffer pool will have _at least_ this size
* note : it ' s better for all buffers to have same size ,
* as they become freely interchangeable , reducing malloc / free usages and memory fragmentation */
2017-11-29 16:42:20 -08:00
static void ZSTDMT_setBufferSize ( ZSTDMT_bufferPool * const bufPool , size_t const bSize )
2017-07-11 15:17:25 -07:00
{
2017-11-01 16:57:48 -07:00
ZSTD_pthread_mutex_lock ( & bufPool - > poolMutex ) ;
DEBUGLOG ( 4 , " ZSTDMT_setBufferSize: bSize = %u " , ( U32 ) bSize ) ;
2017-07-11 15:17:25 -07:00
bufPool - > bufferSize = bSize ;
2017-11-01 16:57:48 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2017-07-11 15:17:25 -07:00
}
2017-07-04 10:36:41 -07:00
/** ZSTDMT_getBuffer() :
2018-01-17 12:10:15 -08:00
* assumption : bufPool must be valid
* @ return : a buffer , with start pointer and size
* note : allocation may fail , in this case , start = = NULL and size = = 0 */
2017-07-11 15:17:25 -07:00
static buffer_t ZSTDMT_getBuffer ( ZSTDMT_bufferPool * bufPool )
2016-12-26 22:19:36 -08:00
{
2017-07-11 15:17:25 -07:00
size_t const bSize = bufPool - > bufferSize ;
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: bSize = %u " , ( U32 ) bufPool - > bufferSize ) ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & bufPool - > poolMutex ) ;
2017-07-11 14:14:07 -07:00
if ( bufPool - > nbBuffers ) { /* try to use an existing buffer */
buffer_t const buf = bufPool - > bTable [ - - ( bufPool - > nbBuffers ) ] ;
2018-01-26 13:00:14 -08:00
size_t const availBufferSize = buf . capacity ;
2017-09-29 16:27:47 -07:00
bufPool - > bTable [ bufPool - > nbBuffers ] = g_nullBuffer ;
2017-11-01 16:57:48 -07:00
if ( ( availBufferSize > = bSize ) & ( ( availBufferSize > > 3 ) < = bSize ) ) {
2017-05-30 16:12:06 -07:00
/* large enough, but not too much */
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: provide buffer %u of size %u " ,
2018-01-26 13:00:14 -08:00
bufPool - > nbBuffers , ( U32 ) buf . capacity ) ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2016-12-26 22:19:36 -08:00
return buf ;
2017-07-11 14:14:07 -07:00
}
2017-05-30 16:12:06 -07:00
/* size conditions not respected : scratch this buffer, create new one */
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing " ) ;
2017-07-11 14:14:07 -07:00
ZSTD_free ( buf . start , bufPool - > cMem ) ;
2016-12-26 22:19:36 -08:00
}
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2016-12-26 22:19:36 -08:00
/* create new buffer */
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: create a new buffer " ) ;
2017-01-20 12:23:30 -08:00
{ buffer_t buffer ;
2017-07-11 14:14:07 -07:00
void * const start = ZSTD_malloc ( bSize , bufPool - > cMem ) ;
2017-01-21 21:56:36 -08:00
buffer . start = start ; /* note : start can be NULL if malloc fails ! */
2018-01-26 13:00:14 -08:00
buffer . capacity = ( start = = NULL ) ? 0 : bSize ;
2018-01-17 12:10:15 -08:00
if ( start = = NULL ) {
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: buffer allocation failure !! " ) ;
} else {
DEBUGLOG ( 5 , " ZSTDMT_getBuffer: created buffer of size %u " , ( U32 ) bSize ) ;
}
2017-01-21 21:56:36 -08:00
return buffer ;
}
2016-12-26 22:19:36 -08:00
}
2017-01-11 06:35:56 -08:00
/* store buffer for later re-use, up to pool capacity */
2017-07-11 14:14:07 -07:00
static void ZSTDMT_releaseBuffer ( ZSTDMT_bufferPool * bufPool , buffer_t buf )
2016-12-26 22:19:36 -08:00
{
2017-07-11 15:56:40 -07:00
if ( buf . start = = NULL ) return ; /* compatible with release on NULL */
2017-07-11 17:18:26 -07:00
DEBUGLOG ( 5 , " ZSTDMT_releaseBuffer " ) ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & bufPool - > poolMutex ) ;
2017-07-11 14:14:07 -07:00
if ( bufPool - > nbBuffers < bufPool - > totalBuffers ) {
2017-07-11 17:18:26 -07:00
bufPool - > bTable [ bufPool - > nbBuffers + + ] = buf ; /* stored for later use */
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u " ,
2018-01-26 13:00:14 -08:00
( U32 ) buf . capacity , ( U32 ) ( bufPool - > nbBuffers - 1 ) ) ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2016-12-26 22:19:36 -08:00
return ;
}
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & bufPool - > poolMutex ) ;
2016-12-31 05:45:33 -08:00
/* Reached bufferPool capacity (should not happen) */
2017-11-01 16:57:48 -07:00
DEBUGLOG ( 5 , " ZSTDMT_releaseBuffer: pool capacity reached => freeing " ) ;
2017-07-11 14:14:07 -07:00
ZSTD_free ( buf . start , bufPool - > cMem ) ;
2016-12-26 22:19:36 -08:00
}
2017-01-11 16:25:46 -08:00
/* ===== CCtx Pool ===== */
2017-07-11 14:14:07 -07:00
/* a single CCtx Pool can be invoked from multiple threads in parallel */
2017-07-10 16:30:55 -07:00
2016-12-30 21:04:25 -08:00
typedef struct {
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_t poolMutex ;
2016-12-30 21:04:25 -08:00
unsigned totalCCtx ;
unsigned availCCtx ;
2017-05-30 16:12:06 -07:00
ZSTD_customMem cMem ;
2016-12-30 21:04:25 -08:00
ZSTD_CCtx * cctx [ 1 ] ; /* variable size */
} ZSTDMT_CCtxPool ;
2017-01-11 06:44:26 -08:00
/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
static void ZSTDMT_freeCCtxPool ( ZSTDMT_CCtxPool * pool )
{
unsigned u ;
2017-01-18 12:12:10 -08:00
for ( u = 0 ; u < pool - > totalCCtx ; u + + )
ZSTD_freeCCtx ( pool - > cctx [ u ] ) ; /* note : compatible with free on NULL */
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_destroy ( & pool - > poolMutex ) ;
2017-05-30 16:12:06 -07:00
ZSTD_free ( pool , pool - > cMem ) ;
2017-01-11 06:44:26 -08:00
}
2017-01-21 22:06:49 -08:00
/* ZSTDMT_createCCtxPool() :
* implies nbThreads > = 1 , checked by caller ZSTDMT_createCCtx ( ) */
2017-05-30 16:12:06 -07:00
static ZSTDMT_CCtxPool * ZSTDMT_createCCtxPool ( unsigned nbThreads ,
2017-06-01 17:56:14 -07:00
ZSTD_customMem cMem )
2016-12-30 21:04:25 -08:00
{
2017-05-30 16:12:06 -07:00
ZSTDMT_CCtxPool * const cctxPool = ( ZSTDMT_CCtxPool * ) ZSTD_calloc (
sizeof ( ZSTDMT_CCtxPool ) + ( nbThreads - 1 ) * sizeof ( ZSTD_CCtx * ) , cMem ) ;
2016-12-30 21:04:25 -08:00
if ( ! cctxPool ) return NULL ;
2017-09-27 11:16:24 -07:00
if ( ZSTD_pthread_mutex_init ( & cctxPool - > poolMutex , NULL ) ) {
2017-07-19 01:05:40 -07:00
ZSTD_free ( cctxPool , cMem ) ;
return NULL ;
}
2017-05-30 16:12:06 -07:00
cctxPool - > cMem = cMem ;
2017-01-18 12:12:10 -08:00
cctxPool - > totalCCtx = nbThreads ;
2017-01-23 01:43:58 -08:00
cctxPool - > availCCtx = 1 ; /* at least one cctx for single-thread mode */
2017-05-30 16:12:06 -07:00
cctxPool - > cctx [ 0 ] = ZSTD_createCCtx_advanced ( cMem ) ;
2017-01-23 01:43:58 -08:00
if ( ! cctxPool - > cctx [ 0 ] ) { ZSTDMT_freeCCtxPool ( cctxPool ) ; return NULL ; }
2017-07-04 10:36:41 -07:00
DEBUGLOG ( 3 , " cctxPool created, with %u threads " , nbThreads ) ;
2016-12-30 21:04:25 -08:00
return cctxPool ;
}
2017-06-01 17:56:14 -07:00
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool ( ZSTDMT_CCtxPool * cctxPool )
{
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & cctxPool - > poolMutex ) ;
2017-07-10 16:30:55 -07:00
{ unsigned const nbThreads = cctxPool - > totalCCtx ;
size_t const poolSize = sizeof ( * cctxPool )
+ ( nbThreads - 1 ) * sizeof ( ZSTD_CCtx * ) ;
unsigned u ;
size_t totalCCtxSize = 0 ;
for ( u = 0 ; u < nbThreads ; u + + ) {
totalCCtxSize + = ZSTD_sizeof_CCtx ( cctxPool - > cctx [ u ] ) ;
}
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & cctxPool - > poolMutex ) ;
2017-07-10 16:30:55 -07:00
return poolSize + totalCCtxSize ;
}
2017-06-01 17:56:14 -07:00
}
2017-07-10 16:30:55 -07:00
static ZSTD_CCtx * ZSTDMT_getCCtx ( ZSTDMT_CCtxPool * cctxPool )
2016-12-30 21:04:25 -08:00
{
2017-07-10 16:30:55 -07:00
DEBUGLOG ( 5 , " ZSTDMT_getCCtx " ) ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & cctxPool - > poolMutex ) ;
2017-07-10 16:30:55 -07:00
if ( cctxPool - > availCCtx ) {
cctxPool - > availCCtx - - ;
{ ZSTD_CCtx * const cctx = cctxPool - > cctx [ cctxPool - > availCCtx ] ;
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & cctxPool - > poolMutex ) ;
2017-07-10 16:30:55 -07:00
return cctx ;
} }
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & cctxPool - > poolMutex ) ;
2017-07-10 16:30:55 -07:00
DEBUGLOG ( 5 , " create one more CCtx " ) ;
return ZSTD_createCCtx_advanced ( cctxPool - > cMem ) ; /* note : can be NULL, when creation fails ! */
2016-12-26 22:19:36 -08:00
}
2016-12-30 21:04:25 -08:00
static void ZSTDMT_releaseCCtx ( ZSTDMT_CCtxPool * pool , ZSTD_CCtx * cctx )
{
2017-01-18 12:12:10 -08:00
if ( cctx = = NULL ) return ; /* compatibility with release on NULL */
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_lock ( & pool - > poolMutex ) ;
2016-12-30 21:04:25 -08:00
if ( pool - > availCCtx < pool - > totalCCtx )
pool - > cctx [ pool - > availCCtx + + ] = cctx ;
2017-07-10 16:30:55 -07:00
else {
2017-01-18 12:12:10 -08:00
/* pool overflow : should not happen, since totalCCtx==nbThreads */
2017-07-10 16:30:55 -07:00
DEBUGLOG ( 5 , " CCtx pool overflow : free cctx " ) ;
2016-12-30 21:04:25 -08:00
ZSTD_freeCCtx ( cctx ) ;
2017-07-10 16:30:55 -07:00
}
2017-09-27 11:16:24 -07:00
ZSTD_pthread_mutex_unlock ( & pool - > poolMutex ) ;
2016-12-30 21:04:25 -08:00
}
2018-01-13 13:18:57 -08:00
/* ------------------------------------------ */
2018-01-26 10:20:38 -08:00
/* ===== Worker thread ===== */
2018-01-13 13:18:57 -08:00
/* ------------------------------------------ */
2017-01-11 16:25:46 -08:00
typedef struct {
2018-01-26 10:44:09 -08:00
size_t consumed ; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
2018-01-26 12:15:43 -08:00
size_t cSize ; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_t job_mutex ; /* Thread-safe - used by mtctx and worker */
ZSTD_pthread_cond_t job_cond ; /* Thread-safe - used by mtctx and worker */
2018-01-26 12:15:43 -08:00
ZSTDMT_CCtxPool * cctxPool ; /* Thread-safe - used by mtctx and (all) workers */
ZSTDMT_bufferPool * bufPool ; /* Thread-safe - used by mtctx and (all) workers */
buffer_t dstBuff ; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
2018-01-26 10:44:09 -08:00
buffer_t srcBuff ; /* set by mtctx, then released by worker => no barrier */
2018-01-26 12:15:43 -08:00
const void * prefixStart ; /* set by mtctx, then read and set0 by worker => no barrier */
2018-01-26 10:44:09 -08:00
size_t prefixSize ; /* set by mtctx, then read by worker => no barrier */
2018-01-26 12:15:43 -08:00
size_t srcSize ; /* set by mtctx, then read by worker & mtctx => no barrier */
2018-01-26 10:44:09 -08:00
unsigned firstChunk ; /* set by mtctx, then read by worker => no barrier */
unsigned lastChunk ; /* set by mtctx, then read by worker => no barrier */
2018-01-26 10:20:38 -08:00
ZSTD_CCtx_params params ; /* set by mtctx, then read by worker => no barrier */
const ZSTD_CDict * cdict ; /* set by mtctx, then read by worker => no barrier */
unsigned long long fullFrameSize ; /* set by mtctx, then read by worker => no barrier */
size_t dstFlushed ; /* used only by mtctx */
unsigned frameChecksumNeeded ; /* used only by mtctx */
2017-01-11 16:25:46 -08:00
} ZSTDMT_jobDescription ;
2018-01-13 13:18:57 -08:00
/* ZSTDMT_compressChunk() is a POOL_function type */
2017-01-11 16:25:46 -08:00
void ZSTDMT_compressChunk ( void * jobDescription )
{
ZSTDMT_jobDescription * const job = ( ZSTDMT_jobDescription * ) jobDescription ;
2017-11-01 16:57:48 -07:00
ZSTD_CCtx * const cctx = ZSTDMT_getCCtx ( job - > cctxPool ) ;
2018-01-26 10:44:09 -08:00
const void * const src = ( const char * ) job - > prefixStart + job - > prefixSize ;
2017-07-11 14:59:10 -07:00
buffer_t dstBuff = job - > dstBuff ;
2017-07-10 16:30:55 -07:00
2018-01-13 13:18:57 -08:00
/* ressources */
2017-07-10 16:30:55 -07:00
if ( cctx = = NULL ) {
job - > cSize = ERROR ( memory_allocation ) ;
goto _endJob ;
}
2018-01-26 12:15:43 -08:00
if ( dstBuff . start = = NULL ) { /* streaming job : doesn't provide a dstBuffer */
2017-07-11 15:17:25 -07:00
dstBuff = ZSTDMT_getBuffer ( job - > bufPool ) ;
2017-07-11 14:59:10 -07:00
if ( dstBuff . start = = NULL ) {
job - > cSize = ERROR ( memory_allocation ) ;
goto _endJob ;
}
2018-01-25 17:35:49 -08:00
job - > dstBuff = dstBuff ; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
2017-07-11 14:59:10 -07:00
}
2018-01-13 13:18:57 -08:00
/* init */
2017-10-13 18:32:06 -07:00
if ( job - > cdict ) {
2017-12-12 16:20:51 -08:00
size_t const initError = ZSTD_compressBegin_advanced_internal ( cctx , NULL , 0 , ZSTD_dm_auto , job - > cdict , job - > params , job - > fullFrameSize ) ;
2017-12-12 14:01:54 -08:00
assert ( job - > firstChunk ) ; /* only allowed for first job */
2017-01-22 16:40:06 -08:00
if ( ZSTD_isError ( initError ) ) { job - > cSize = initError ; goto _endJob ; }
2017-02-23 23:09:10 -08:00
} else { /* srcStart points at reloaded section */
2018-01-16 15:34:41 -08:00
U64 const pledgedSrcSize = job - > firstChunk ? job - > fullFrameSize : job - > srcSize ;
2017-12-01 21:17:09 -08:00
ZSTD_CCtx_params jobParams = job - > params ; /* do not modify job->params ! copy it, modify the copy */
2018-01-13 13:18:57 -08:00
{ size_t const forceWindowError = ZSTD_CCtxParam_setParameter ( & jobParams , ZSTD_p_forceMaxWindow , ! job - > firstChunk ) ;
if ( ZSTD_isError ( forceWindowError ) ) {
job - > cSize = forceWindowError ;
goto _endJob ;
} }
fix a subtle issue in continue mode
The deep fuzzer tests caught a subtle bug that was probably there for a long time.
The impact of the bug is not a crash, or any other clear error signal,
rather, it reduces performance, by cutting data into smaller blocks.
Eventually, the following test would fail because it produces too many 1-byte blocks,
requiring more space than buffer can provide :
`./zstreamtest_asan --mt -s3514 -t1678312 -i1678314`
The root scenario is as follows :
- Create context, initialize it using explicit parameters or a `cdict` to pin them down, set `pledgedSrcSize=1`
- The compression parameters will not be adapted, but `windowSize` and `blockSize` will be automatically set to `1`.
`windowSize` and `blockSize` are dynamic values, set within `ZSTD_resetCCtx_internal()`.
The automatic adaptation makes it possible to generate smaller contexts for smaller input sizes.
- Complete compression
- New compression with same context, using same parameters, but `pledgedSrcSize=ZSTD_CONTENTSIZE_UNKNOWN`
trigger "continue mode"
- Continue mode doesn't modify blockSize, because it used to depend on `windowLog` only,
but in fact, it also depends on `pledgedSrcSize`.
- The "old" blocksize (1) is still there,
next compression will use this value to cut input into blocks,
resulting in more blocks and worse performance than necessary performance.
Given the scenario, and its possible variants, I'm surprised it did not show up before.
But I suspect it did show up, it's just that it never triggered an error, because "worse performance" is not a trigger.
The above test is a special corner case, where performance is so impacted that it reaches an error case.
The fix works, but I'm not completely pleased.
I think the current code relies too much on implied relations between variables.
This will likely break again in the future when some related part of the code change.
Unfortunately, no time to make larger changes if we want to keep the release target for zstd v1.3.3.
So a longer term fix will have to be considered after the release.
To do : create a reliable test case which triggers this scenario for CI tests.
2017-12-19 00:43:03 -08:00
{ size_t const initError = ZSTD_compressBegin_advanced_internal ( cctx ,
2018-01-26 10:44:09 -08:00
job - > prefixStart , job - > prefixSize , ZSTD_dm_rawContent , /* load dictionary in "content-only" mode (no header analysis) */
2018-01-16 15:34:41 -08:00
NULL , /*cdict*/
fix a subtle issue in continue mode
The deep fuzzer tests caught a subtle bug that was probably there for a long time.
The impact of the bug is not a crash, or any other clear error signal,
rather, it reduces performance, by cutting data into smaller blocks.
Eventually, the following test would fail because it produces too many 1-byte blocks,
requiring more space than buffer can provide :
`./zstreamtest_asan --mt -s3514 -t1678312 -i1678314`
The root scenario is as follows :
- Create context, initialize it using explicit parameters or a `cdict` to pin them down, set `pledgedSrcSize=1`
- The compression parameters will not be adapted, but `windowSize` and `blockSize` will be automatically set to `1`.
`windowSize` and `blockSize` are dynamic values, set within `ZSTD_resetCCtx_internal()`.
The automatic adaptation makes it possible to generate smaller contexts for smaller input sizes.
- Complete compression
- New compression with same context, using same parameters, but `pledgedSrcSize=ZSTD_CONTENTSIZE_UNKNOWN`
trigger "continue mode"
- Continue mode doesn't modify blockSize, because it used to depend on `windowLog` only,
but in fact, it also depends on `pledgedSrcSize`.
- The "old" blocksize (1) is still there,
next compression will use this value to cut input into blocks,
resulting in more blocks and worse performance than necessary performance.
Given the scenario, and its possible variants, I'm surprised it did not show up before.
But I suspect it did show up, it's just that it never triggered an error, because "worse performance" is not a trigger.
The above test is a special corner case, where performance is so impacted that it reaches an error case.
The fix works, but I'm not completely pleased.
I think the current code relies too much on implied relations between variables.
This will likely break again in the future when some related part of the code change.
Unfortunately, no time to make larger changes if we want to keep the release target for zstd v1.3.3.
So a longer term fix will have to be considered after the release.
To do : create a reliable test case which triggers this scenario for CI tests.
2017-12-19 00:43:03 -08:00
jobParams , pledgedSrcSize ) ;
2017-12-12 16:20:51 -08:00
if ( ZSTD_isError ( initError ) ) {
job - > cSize = initError ;
goto _endJob ;
2018-01-25 17:35:49 -08:00
} } }
2017-12-12 14:01:54 -08:00
if ( ! job - > firstChunk ) { /* flush and overwrite frame header when it's not first job */
2018-01-26 13:00:14 -08:00
size_t const hSize = ZSTD_compressContinue ( cctx , dstBuff . start , dstBuff . capacity , src , 0 ) ;
2017-12-12 16:20:51 -08:00
if ( ZSTD_isError ( hSize ) ) { job - > cSize = hSize ; /* save error code */ goto _endJob ; }
2018-01-16 15:28:43 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compressChunk: flush and overwrite %u bytes of frame header (not first chunk) " , ( U32 ) hSize ) ;
2017-07-10 16:30:55 -07:00
ZSTD_invalidateRepCodes ( cctx ) ;
2017-01-19 10:18:17 -08:00
}
2017-01-11 16:25:46 -08:00
2018-01-13 13:18:57 -08:00
/* compress */
2018-01-26 17:08:58 -08:00
{ size_t const blockSize = ZSTD_BLOCKSIZE_MAX ;
int const nbBlocks = ( int ) ( ( job - > srcSize + ( blockSize - 1 ) ) / blockSize ) ;
2018-01-13 13:18:57 -08:00
const BYTE * ip = ( const BYTE * ) src ;
BYTE * const ostart = ( BYTE * ) dstBuff . start ;
BYTE * op = ostart ;
2018-01-26 13:00:14 -08:00
BYTE * oend = op + dstBuff . capacity ;
2018-01-13 13:18:57 -08:00
int blockNb ;
2018-01-26 17:08:58 -08:00
if ( sizeof ( size_t ) > sizeof ( int ) ) assert ( job - > srcSize < ( ( size_t ) INT_MAX ) * blockSize ) ; /* check overflow */
2018-01-19 10:01:40 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compressChunk: compress %u bytes in %i blocks " , ( U32 ) job - > srcSize , nbBlocks ) ;
2018-01-17 17:18:19 -08:00
assert ( job - > cSize = = 0 ) ;
2018-01-16 15:28:43 -08:00
for ( blockNb = 1 ; blockNb < nbBlocks ; blockNb + + ) {
2018-01-26 17:08:58 -08:00
size_t const cSize = ZSTD_compressContinue ( cctx , op , oend - op , ip , blockSize ) ;
2018-01-13 13:18:57 -08:00
if ( ZSTD_isError ( cSize ) ) { job - > cSize = cSize ; goto _endJob ; }
2018-01-26 17:08:58 -08:00
ip + = blockSize ;
2018-01-13 13:18:57 -08:00
op + = cSize ; assert ( op < oend ) ;
/* stats */
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & job - > job_mutex ) ; /* note : it's a mtctx mutex */
2018-01-13 13:18:57 -08:00
job - > cSize + = cSize ;
2018-01-26 17:08:58 -08:00
job - > consumed = blockSize * blockNb ;
2018-01-19 10:01:40 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compressChunk: compress new block : cSize==%u bytes (total: %u) " ,
2018-01-18 11:03:27 -08:00
( U32 ) cSize , ( U32 ) job - > cSize ) ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_cond_signal ( & job - > job_cond ) ; /* warns some more data is ready to be flushed */
ZSTD_pthread_mutex_unlock ( & job - > job_mutex ) ;
2018-01-13 13:18:57 -08:00
}
/* last block */
2018-01-26 17:08:58 -08:00
assert ( blockSize > 0 ) ; assert ( ( blockSize & ( blockSize - 1 ) ) = = 0 ) ; /* blockSize must be power of 2 for mask==(blockSize-1) to work */
2018-01-18 11:03:27 -08:00
if ( ( nbBlocks > 0 ) | job - > lastChunk /*must output a "last block" flag*/ ) {
2018-01-26 17:08:58 -08:00
size_t const lastBlockSize1 = job - > srcSize & ( blockSize - 1 ) ;
size_t const lastBlockSize = ( ( lastBlockSize1 = = 0 ) & ( job - > srcSize > = blockSize ) ) ? blockSize : lastBlockSize1 ;
2018-01-13 13:18:57 -08:00
size_t const cSize = ( job - > lastChunk ) ?
ZSTD_compressEnd ( cctx , op , oend - op , ip , lastBlockSize ) :
ZSTD_compressContinue ( cctx , op , oend - op , ip , lastBlockSize ) ;
if ( ZSTD_isError ( cSize ) ) { job - > cSize = cSize ; goto _endJob ; }
/* stats */
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & job - > job_mutex ) ;
2018-01-13 13:18:57 -08:00
job - > cSize + = cSize ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_unlock ( & job - > job_mutex ) ;
2018-01-25 17:35:49 -08:00
} }
2017-01-11 16:25:46 -08:00
_endJob :
2018-01-25 17:35:49 -08:00
/* release resources */
2017-07-10 16:30:55 -07:00
ZSTDMT_releaseCCtx ( job - > cctxPool , cctx ) ;
2018-01-26 10:44:09 -08:00
ZSTDMT_releaseBuffer ( job - > bufPool , job - > srcBuff ) ;
job - > srcBuff = g_nullBuffer ; job - > prefixStart = NULL ;
2018-01-13 13:18:57 -08:00
/* report */
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & job - > job_mutex ) ;
2018-01-17 17:18:19 -08:00
job - > consumed = job - > srcSize ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_cond_signal ( & job - > job_cond ) ;
ZSTD_pthread_mutex_unlock ( & job - > job_mutex ) ;
2017-01-11 16:25:46 -08:00
}
2017-01-17 15:31:16 -08:00
/* ------------------------------------------ */
2017-01-11 16:25:46 -08:00
/* ===== Multi-threaded compression ===== */
2017-01-17 15:31:16 -08:00
/* ------------------------------------------ */
2017-01-11 16:25:46 -08:00
2017-07-11 15:56:40 -07:00
typedef struct {
buffer_t buffer ;
2018-01-26 13:00:14 -08:00
size_t targetCapacity ; /* note : buffers provided by the pool may be larger than target capacity */
size_t prefixSize ;
2017-07-11 15:56:40 -07:00
size_t filled ;
} inBuff_t ;
2016-12-30 21:04:25 -08:00
struct ZSTDMT_CCtx_s {
POOL_ctx * factory ;
2017-05-27 00:21:33 -07:00
ZSTDMT_jobDescription * jobs ;
2017-07-11 14:59:10 -07:00
ZSTDMT_bufferPool * bufPool ;
2016-12-30 21:04:25 -08:00
ZSTDMT_CCtxPool * cctxPool ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
ZSTD_CCtx_params params ;
2017-01-17 15:31:16 -08:00
size_t targetSectionSize ;
2018-01-17 11:39:07 -08:00
size_t targetPrefixSize ;
2017-01-17 15:31:16 -08:00
inBuff_t inBuff ;
2018-01-19 10:01:40 -08:00
int jobReady ; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
2017-01-24 11:48:40 -08:00
XXH64_state_t xxhState ;
2018-01-16 15:28:43 -08:00
unsigned singleBlockingThread ;
2017-01-17 15:31:16 -08:00
unsigned jobIDMask ;
unsigned doneJobID ;
unsigned nextJobID ;
unsigned frameEnded ;
2017-01-18 15:18:17 -08:00
unsigned allJobsCompleted ;
2017-01-19 15:32:07 -08:00
unsigned long long frameContentSize ;
2018-01-17 16:39:02 -08:00
unsigned long long consumed ;
unsigned long long produced ;
2017-05-30 16:12:06 -07:00
ZSTD_customMem cMem ;
2017-06-03 01:15:02 -07:00
ZSTD_CDict * cdictLocal ;
const ZSTD_CDict * cdict ;
2016-12-30 21:04:25 -08:00
} ;
2018-01-26 18:09:25 -08:00
static void ZSTDMT_freeJobsTable ( ZSTDMT_jobDescription * jobTable , U32 nbJobs , ZSTD_customMem cMem )
{
U32 jobNb ;
if ( jobTable = = NULL ) return ;
for ( jobNb = 0 ; jobNb < nbJobs ; jobNb + + ) {
ZSTD_pthread_mutex_destroy ( & jobTable [ jobNb ] . job_mutex ) ;
ZSTD_pthread_cond_destroy ( & jobTable [ jobNb ] . job_cond ) ;
}
ZSTD_free ( jobTable , cMem ) ;
}
2018-01-26 17:08:58 -08:00
/* ZSTDMT_allocJobsTable()
2018-01-26 17:48:33 -08:00
* allocate and init a job table .
2018-01-26 18:09:25 -08:00
* update * nbJobsPtr to next power of 2 value , as size of table */
2018-01-26 17:48:33 -08:00
static ZSTDMT_jobDescription * ZSTDMT_createJobsTable ( U32 * nbJobsPtr , ZSTD_customMem cMem )
2017-06-30 15:44:57 -07:00
{
U32 const nbJobsLog2 = ZSTD_highbit32 ( * nbJobsPtr ) + 1 ;
U32 const nbJobs = 1 < < nbJobsLog2 ;
2018-01-26 18:09:25 -08:00
U32 jobNb ;
2018-01-26 17:48:33 -08:00
ZSTDMT_jobDescription * const jobTable = ZSTD_calloc (
2017-06-30 15:44:57 -07:00
nbJobs * sizeof ( ZSTDMT_jobDescription ) , cMem ) ;
2018-01-26 18:09:25 -08:00
int initError = 0 ;
2018-01-26 17:48:33 -08:00
if ( jobTable = = NULL ) return NULL ;
* nbJobsPtr = nbJobs ;
for ( jobNb = 0 ; jobNb < nbJobs ; jobNb + + ) {
2018-01-26 18:09:25 -08:00
initError | = ZSTD_pthread_mutex_init ( & jobTable [ jobNb ] . job_mutex , NULL ) ;
initError | = ZSTD_pthread_cond_init ( & jobTable [ jobNb ] . job_cond , NULL ) ;
2018-01-26 17:48:33 -08:00
}
2018-01-26 18:09:25 -08:00
if ( initError ! = 0 ) {
ZSTDMT_freeJobsTable ( jobTable , nbJobs , cMem ) ;
return NULL ;
2018-01-26 17:48:33 -08:00
}
2018-01-26 18:09:25 -08:00
return jobTable ;
2017-06-30 15:44:57 -07:00
}
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
/* ZSTDMT_CCtxParam_setNbThreads():
* Internal use only */
size_t ZSTDMT_CCtxParam_setNbThreads ( ZSTD_CCtx_params * params , unsigned nbThreads )
2017-08-25 13:14:51 -07:00
{
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
if ( nbThreads > ZSTDMT_NBTHREADS_MAX ) nbThreads = ZSTDMT_NBTHREADS_MAX ;
if ( nbThreads < 1 ) nbThreads = 1 ;
2017-08-25 13:14:51 -07:00
params - > nbThreads = nbThreads ;
params - > overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT ;
params - > jobSize = 0 ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return nbThreads ;
2017-08-25 13:14:51 -07:00
}
2017-05-30 16:12:06 -07:00
ZSTDMT_CCtx * ZSTDMT_createCCtx_advanced ( unsigned nbThreads , ZSTD_customMem cMem )
2016-12-26 22:19:36 -08:00
{
2017-05-30 16:12:06 -07:00
ZSTDMT_CCtx * mtctx ;
2017-06-30 15:44:57 -07:00
U32 nbJobs = nbThreads + 2 ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
DEBUGLOG ( 3 , " ZSTDMT_createCCtx_advanced (nbThreads = %u) " , nbThreads ) ;
2017-05-30 16:12:06 -07:00
2017-07-13 10:10:13 -07:00
if ( nbThreads < 1 ) return NULL ;
nbThreads = MIN ( nbThreads , ZSTDMT_NBTHREADS_MAX ) ;
2017-05-30 16:12:06 -07:00
if ( ( cMem . customAlloc ! = NULL ) ^ ( cMem . customFree ! = NULL ) )
/* invalid custom allocator */
return NULL ;
mtctx = ( ZSTDMT_CCtx * ) ZSTD_calloc ( sizeof ( ZSTDMT_CCtx ) , cMem ) ;
if ( ! mtctx ) return NULL ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
ZSTDMT_CCtxParam_setNbThreads ( & mtctx - > params , nbThreads ) ;
2017-05-30 16:12:06 -07:00
mtctx - > cMem = cMem ;
mtctx - > allJobsCompleted = 1 ;
2017-08-24 18:12:28 -07:00
mtctx - > factory = POOL_create_advanced ( nbThreads , 0 , cMem ) ;
2018-01-26 17:48:33 -08:00
mtctx - > jobs = ZSTDMT_createJobsTable ( & nbJobs , cMem ) ;
2018-01-26 17:08:58 -08:00
assert ( nbJobs > 0 ) ; assert ( ( nbJobs & ( nbJobs - 1 ) ) = = 0 ) ; /* ensure nbJobs is a power of 2 */
2017-06-30 15:44:57 -07:00
mtctx - > jobIDMask = nbJobs - 1 ;
2017-07-11 14:59:10 -07:00
mtctx - > bufPool = ZSTDMT_createBufferPool ( nbThreads , cMem ) ;
2017-05-30 16:12:06 -07:00
mtctx - > cctxPool = ZSTDMT_createCCtxPool ( nbThreads , cMem ) ;
2017-07-11 14:59:10 -07:00
if ( ! mtctx - > factory | ! mtctx - > jobs | ! mtctx - > bufPool | ! mtctx - > cctxPool ) {
2017-05-30 16:12:06 -07:00
ZSTDMT_freeCCtx ( mtctx ) ;
2017-01-11 06:58:05 -08:00
return NULL ;
}
2017-07-04 10:36:41 -07:00
DEBUGLOG ( 3 , " mt_cctx created, for %u threads " , nbThreads ) ;
2017-05-30 16:12:06 -07:00
return mtctx ;
}
ZSTDMT_CCtx * ZSTDMT_createCCtx ( unsigned nbThreads )
{
return ZSTDMT_createCCtx_advanced ( nbThreads , ZSTD_defaultCMem ) ;
2016-12-26 22:19:36 -08:00
}
2018-01-17 16:39:02 -08:00
2017-01-17 17:46:33 -08:00
/* ZSTDMT_releaseAllJobResources() :
2017-06-30 14:51:01 -07:00
* note : ensure all workers are killed first ! */
2017-01-17 17:46:33 -08:00
static void ZSTDMT_releaseAllJobResources ( ZSTDMT_CCtx * mtctx )
{
unsigned jobID ;
2017-07-04 10:36:41 -07:00
DEBUGLOG ( 3 , " ZSTDMT_releaseAllJobResources " ) ;
2017-01-17 17:46:33 -08:00
for ( jobID = 0 ; jobID < = mtctx - > jobIDMask ; jobID + + ) {
2017-10-02 17:28:57 -07:00
DEBUGLOG ( 4 , " job%02u: release dst address %08X " , jobID , ( U32 ) ( size_t ) mtctx - > jobs [ jobID ] . dstBuff . start ) ;
2017-07-11 14:59:10 -07:00
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > jobs [ jobID ] . dstBuff ) ;
2017-01-17 17:46:33 -08:00
mtctx - > jobs [ jobID ] . dstBuff = g_nullBuffer ;
2018-01-26 14:35:54 -08:00
mtctx - > jobs [ jobID ] . cSize = 0 ;
2018-01-26 10:44:09 -08:00
DEBUGLOG ( 4 , " job%02u: release src address %08X " , jobID , ( U32 ) ( size_t ) mtctx - > jobs [ jobID ] . srcBuff . start ) ;
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > jobs [ jobID ] . srcBuff ) ;
mtctx - > jobs [ jobID ] . srcBuff = g_nullBuffer ;
2017-01-17 17:46:33 -08:00
}
2017-01-19 10:18:17 -08:00
memset ( mtctx - > jobs , 0 , ( mtctx - > jobIDMask + 1 ) * sizeof ( ZSTDMT_jobDescription ) ) ;
2017-10-02 17:28:57 -07:00
DEBUGLOG ( 4 , " input: release address %08X " , ( U32 ) ( size_t ) mtctx - > inBuff . buffer . start ) ;
2017-07-11 14:59:10 -07:00
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > inBuff . buffer ) ;
2017-01-18 11:57:34 -08:00
mtctx - > inBuff . buffer = g_nullBuffer ;
2017-01-19 10:18:17 -08:00
mtctx - > allJobsCompleted = 1 ;
2017-01-17 17:46:33 -08:00
}
2018-01-25 17:35:49 -08:00
static void ZSTDMT_waitForAllJobsCompleted ( ZSTDMT_CCtx * mtctx )
2017-09-28 02:33:41 -07:00
{
DEBUGLOG ( 4 , " ZSTDMT_waitForAllJobsCompleted " ) ;
2018-01-25 17:35:49 -08:00
while ( mtctx - > doneJobID < mtctx - > nextJobID ) {
unsigned const jobID = mtctx - > doneJobID & mtctx - > jobIDMask ;
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & mtctx - > jobs [ jobID ] . job_mutex ) ;
2018-01-25 17:35:49 -08:00
while ( mtctx - > jobs [ jobID ] . consumed < mtctx - > jobs [ jobID ] . srcSize ) {
DEBUGLOG ( 5 , " waiting for jobCompleted signal from chunk %u " , mtctx - > doneJobID ) ; /* we want to block when waiting for data to flush */
2018-01-26 17:48:33 -08:00
ZSTD_pthread_cond_wait ( & mtctx - > jobs [ jobID ] . job_cond , & mtctx - > jobs [ jobID ] . job_mutex ) ;
2017-09-28 02:33:41 -07:00
}
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_unlock ( & mtctx - > jobs [ jobID ] . job_mutex ) ;
2018-01-25 17:35:49 -08:00
mtctx - > doneJobID + + ;
2017-09-28 02:33:41 -07:00
}
}
2017-01-11 06:58:05 -08:00
size_t ZSTDMT_freeCCtx ( ZSTDMT_CCtx * mtctx )
2016-12-26 22:19:36 -08:00
{
2017-01-17 17:46:33 -08:00
if ( mtctx = = NULL ) return 0 ; /* compatible with free on NULL */
2017-09-28 20:44:22 -07:00
POOL_free ( mtctx - > factory ) ; /* stop and free worker threads */
ZSTDMT_releaseAllJobResources ( mtctx ) ; /* release job resources into pools first */
2018-01-26 17:48:33 -08:00
ZSTDMT_freeJobsTable ( mtctx - > jobs , mtctx - > jobIDMask + 1 , mtctx - > cMem ) ;
2017-09-28 20:44:22 -07:00
ZSTDMT_freeBufferPool ( mtctx - > bufPool ) ;
2016-12-30 21:04:25 -08:00
ZSTDMT_freeCCtxPool ( mtctx - > cctxPool ) ;
2017-06-03 01:15:02 -07:00
ZSTD_freeCDict ( mtctx - > cdictLocal ) ;
2017-05-30 16:12:06 -07:00
ZSTD_free ( mtctx , mtctx - > cMem ) ;
2016-12-26 22:19:36 -08:00
return 0 ;
}
2017-06-01 17:56:14 -07:00
size_t ZSTDMT_sizeof_CCtx ( ZSTDMT_CCtx * mtctx )
{
if ( mtctx = = NULL ) return 0 ; /* supports sizeof NULL */
return sizeof ( * mtctx )
2017-07-11 14:59:10 -07:00
+ POOL_sizeof ( mtctx - > factory )
+ ZSTDMT_sizeof_bufferPool ( mtctx - > bufPool )
+ ( mtctx - > jobIDMask + 1 ) * sizeof ( ZSTDMT_jobDescription )
+ ZSTDMT_sizeof_CCtxPool ( mtctx - > cctxPool )
+ ZSTD_sizeof_CDict ( mtctx - > cdictLocal ) ;
2017-06-01 17:56:14 -07:00
}
2017-08-25 13:14:51 -07:00
/* Internal only */
2017-12-12 16:20:51 -08:00
size_t ZSTDMT_CCtxParam_setMTCtxParameter ( ZSTD_CCtx_params * params ,
ZSTDMT_parameter parameter , unsigned value ) {
DEBUGLOG ( 4 , " ZSTDMT_CCtxParam_setMTCtxParameter " ) ;
2017-01-24 17:02:26 -08:00
switch ( parameter )
{
2017-12-12 16:20:51 -08:00
case ZSTDMT_p_jobSize :
DEBUGLOG ( 4 , " ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %u " , value ) ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
if ( ( value > 0 ) /* value==0 => automatic job size */
& ( value < ZSTDMT_JOBSIZE_MIN ) )
value = ZSTDMT_JOBSIZE_MIN ;
2017-08-25 13:14:51 -07:00
params - > jobSize = value ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return value ;
2017-01-30 11:00:00 -08:00
case ZSTDMT_p_overlapSectionLog :
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
if ( value > 9 ) value = 9 ;
2017-08-31 18:25:56 -07:00
DEBUGLOG ( 4 , " ZSTDMT_p_overlapSectionLog : %u " , value ) ;
2017-08-25 13:14:51 -07:00
params - > overlapSizeLog = ( value > = 9 ) ? 9 : value ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return value ;
2017-01-24 17:02:26 -08:00
default :
2017-07-13 17:12:16 -07:00
return ERROR ( parameter_unsupported ) ;
2017-01-24 17:02:26 -08:00
}
}
2017-08-25 16:13:40 -07:00
size_t ZSTDMT_setMTCtxParameter ( ZSTDMT_CCtx * mtctx , ZSTDMT_parameter parameter , unsigned value )
2017-08-25 13:14:51 -07:00
{
2017-12-12 16:20:51 -08:00
DEBUGLOG ( 4 , " ZSTDMT_setMTCtxParameter " ) ;
2017-08-25 13:14:51 -07:00
switch ( parameter )
{
2017-12-12 16:20:51 -08:00
case ZSTDMT_p_jobSize :
2017-08-25 13:14:51 -07:00
return ZSTDMT_CCtxParam_setMTCtxParameter ( & mtctx - > params , parameter , value ) ;
case ZSTDMT_p_overlapSectionLog :
return ZSTDMT_CCtxParam_setMTCtxParameter ( & mtctx - > params , parameter , value ) ;
default :
return ERROR ( parameter_unsupported ) ;
}
}
2017-01-24 17:02:26 -08:00
2018-01-26 17:08:58 -08:00
/* Sets parameters relevant to the compression job, initializing others to
* default values . Notably , nbThreads should probably be zero . */
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams ( ZSTD_CCtx_params const params )
{
ZSTD_CCtx_params jobParams ;
memset ( & jobParams , 0 , sizeof ( jobParams ) ) ;
jobParams . cParams = params . cParams ;
jobParams . fParams = params . fParams ;
jobParams . compressionLevel = params . compressionLevel ;
jobParams . ldmParams = params . ldmParams ;
return jobParams ;
}
2018-01-17 16:39:02 -08:00
/* ZSTDMT_getNbThreads():
* @ return nb threads currently active in mtctx .
* mtctx must be valid */
unsigned ZSTDMT_getNbThreads ( const ZSTDMT_CCtx * mtctx )
{
assert ( mtctx ! = NULL ) ;
return mtctx - > params . nbThreads ;
}
/* ZSTDMT_getFrameProgression():
* tells how much data has been consumed ( input ) and produced ( output ) for current frame .
* able to count progression inside worker threads .
2018-01-18 11:15:23 -08:00
* Note : mutex will be acquired during statistics collection . */
2018-01-17 16:39:02 -08:00
ZSTD_frameProgression ZSTDMT_getFrameProgression ( ZSTDMT_CCtx * mtctx )
{
2018-01-26 13:00:14 -08:00
ZSTD_frameProgression fps ;
2018-01-19 10:01:40 -08:00
DEBUGLOG ( 6 , " ZSTDMT_getFrameProgression " ) ;
2018-01-26 13:00:14 -08:00
fps . consumed = mtctx - > consumed ;
fps . produced = mtctx - > produced ;
assert ( mtctx - > inBuff . filled > = mtctx - > inBuff . prefixSize ) ;
fps . ingested = mtctx - > consumed + ( mtctx - > inBuff . filled - mtctx - > inBuff . prefixSize ) ;
2018-01-17 16:39:02 -08:00
{ unsigned jobNb ;
2018-01-19 10:01:40 -08:00
unsigned lastJobNb = mtctx - > nextJobID + mtctx - > jobReady ; assert ( mtctx - > jobReady < = 1 ) ;
DEBUGLOG ( 6 , " ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u) " ,
mtctx - > doneJobID , lastJobNb , mtctx - > jobReady )
for ( jobNb = mtctx - > doneJobID ; jobNb < lastJobNb ; jobNb + + ) {
2018-01-17 16:39:02 -08:00
unsigned const wJobID = jobNb & mtctx - > jobIDMask ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_lock ( & mtctx - > jobs [ wJobID ] . job_mutex ) ;
{ size_t const cResult = mtctx - > jobs [ wJobID ] . cSize ;
size_t const produced = ZSTD_isError ( cResult ) ? 0 : cResult ;
fps . consumed + = mtctx - > jobs [ wJobID ] . consumed ;
fps . ingested + = mtctx - > jobs [ wJobID ] . srcSize ;
fps . produced + = produced ;
}
ZSTD_pthread_mutex_unlock ( & mtctx - > jobs [ wJobID ] . job_mutex ) ;
2018-01-17 16:39:02 -08:00
}
}
2018-01-26 13:00:14 -08:00
return fps ;
2018-01-17 16:39:02 -08:00
}
2017-01-24 17:02:26 -08:00
/* ------------------------------------------ */
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
2016-12-30 21:04:25 -08:00
2018-01-16 15:28:43 -08:00
static unsigned ZSTDMT_computeNbChunks ( size_t srcSize , unsigned windowLog , unsigned nbThreads ) {
assert ( nbThreads > 0 ) ;
{ size_t const chunkSizeTarget = ( size_t ) 1 < < ( windowLog + 2 ) ;
size_t const chunkMaxSize = chunkSizeTarget < < 2 ;
size_t const passSizeMax = chunkMaxSize * nbThreads ;
unsigned const multiplier = ( unsigned ) ( srcSize / passSizeMax ) + 1 ;
unsigned const nbChunksLarge = multiplier * nbThreads ;
unsigned const nbChunksMax = ( unsigned ) ( srcSize / chunkSizeTarget ) + 1 ;
unsigned const nbChunksSmall = MIN ( nbChunksMax , nbThreads ) ;
return ( multiplier > 1 ) ? nbChunksLarge : nbChunksSmall ;
} }
2017-07-03 16:23:36 -07:00
2018-01-16 15:28:43 -08:00
/* ZSTDMT_compress_advanced_internal() :
* This is a blocking function : it will only give back control to caller after finishing its compression job .
*/
2017-08-22 14:24:47 -07:00
static size_t ZSTDMT_compress_advanced_internal (
2017-08-18 16:17:24 -07:00
ZSTDMT_CCtx * mtctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
const ZSTD_CDict * cdict ,
2017-08-25 13:14:51 -07:00
ZSTD_CCtx_params const params )
2016-12-26 22:19:36 -08:00
{
2018-01-16 15:28:43 -08:00
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams ( params ) ;
2017-08-25 13:14:51 -07:00
unsigned const overlapRLog = ( params . overlapSizeLog > 9 ) ? 0 : 9 - params . overlapSizeLog ;
size_t const overlapSize = ( overlapRLog > = 9 ) ? 0 : ( size_t ) 1 < < ( params . cParams . windowLog - overlapRLog ) ;
2018-01-16 15:28:43 -08:00
unsigned nbChunks = ZSTDMT_computeNbChunks ( srcSize , params . cParams . windowLog , params . nbThreads ) ;
2017-01-11 16:25:46 -08:00
size_t const proposedChunkSize = ( srcSize + ( nbChunks - 1 ) ) / nbChunks ;
2017-12-12 14:01:54 -08:00
size_t const avgChunkSize = ( ( ( proposedChunkSize - 1 ) & 0x1FFFF ) < 0x7FFF ) ? proposedChunkSize + 0xFFFF : proposedChunkSize ; /* avoid too small last block */
2016-12-26 22:19:36 -08:00
const char * const srcStart = ( const char * ) src ;
2017-06-30 15:44:57 -07:00
size_t remainingSrcSize = srcSize ;
2017-03-31 18:27:03 -07:00
unsigned const compressWithinDst = ( dstCapacity > = ZSTD_compressBound ( srcSize ) ) ? nbChunks : ( unsigned ) ( dstCapacity / ZSTD_compressBound ( avgChunkSize ) ) ; /* presumes avgChunkSize >= 256 KB, which should be the case */
size_t frameStartPos = 0 , dstBufferPos = 0 ;
2017-07-11 17:18:26 -07:00
XXH64_state_t xxh64 ;
2017-08-25 13:14:51 -07:00
assert ( jobParams . nbThreads = = 0 ) ;
2017-08-25 13:23:16 -07:00
assert ( mtctx - > cctxPool - > totalCCtx = = params . nbThreads ) ;
2016-12-30 21:04:25 -08:00
2018-01-16 15:28:43 -08:00
DEBUGLOG ( 4 , " ZSTDMT_compress_advanced_internal: nbChunks=%2u (rawSize=%u bytes; fixedSize=%u) " ,
2017-12-12 14:01:54 -08:00
nbChunks , ( U32 ) proposedChunkSize , ( U32 ) avgChunkSize ) ;
2018-01-16 15:28:43 -08:00
if ( ( nbChunks = = 1 ) | ( params . nbThreads < = 1 ) ) { /* fallback to single-thread mode : this is a blocking invocation anyway */
2017-01-23 01:43:58 -08:00
ZSTD_CCtx * const cctx = mtctx - > cctxPool - > cctx [ 0 ] ;
2017-08-25 11:36:17 -07:00
if ( cdict ) return ZSTD_compress_usingCDict_advanced ( cctx , dst , dstCapacity , src , srcSize , cdict , jobParams . fParams ) ;
return ZSTD_compress_advanced_internal ( cctx , dst , dstCapacity , src , srcSize , NULL , 0 , jobParams ) ;
2017-01-23 00:56:54 -08:00
}
2018-01-16 15:28:43 -08:00
assert ( avgChunkSize > = 256 KB ) ; /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
2017-07-11 15:17:25 -07:00
ZSTDMT_setBufferSize ( mtctx - > bufPool , ZSTD_compressBound ( avgChunkSize ) ) ;
2017-07-11 17:18:26 -07:00
XXH64_reset ( & xxh64 , 0 ) ;
2017-01-23 00:56:54 -08:00
2017-06-30 15:44:57 -07:00
if ( nbChunks > mtctx - > jobIDMask + 1 ) { /* enlarge job table */
U32 nbJobs = nbChunks ;
2018-01-26 17:48:33 -08:00
ZSTDMT_freeJobsTable ( mtctx - > jobs , mtctx - > jobIDMask + 1 , mtctx - > cMem ) ;
2017-06-30 15:44:57 -07:00
mtctx - > jobIDMask = 0 ;
2018-01-26 17:48:33 -08:00
mtctx - > jobs = ZSTDMT_createJobsTable ( & nbJobs , mtctx - > cMem ) ;
2017-06-30 15:44:57 -07:00
if ( mtctx - > jobs = = NULL ) return ERROR ( memory_allocation ) ;
2018-01-16 15:28:43 -08:00
assert ( ( nbJobs ! = 0 ) & & ( ( nbJobs & ( nbJobs - 1 ) ) = = 0 ) ) ; /* ensure nbJobs is a power of 2 */
2017-06-30 15:44:57 -07:00
mtctx - > jobIDMask = nbJobs - 1 ;
}
2016-12-26 22:19:36 -08:00
{ unsigned u ;
2017-01-11 16:25:46 -08:00
for ( u = 0 ; u < nbChunks ; u + + ) {
size_t const chunkSize = MIN ( remainingSrcSize , avgChunkSize ) ;
2017-03-31 18:27:03 -07:00
size_t const dstBufferCapacity = ZSTD_compressBound ( chunkSize ) ;
buffer_t const dstAsBuffer = { ( char * ) dst + dstBufferPos , dstBufferCapacity } ;
2017-07-11 14:59:10 -07:00
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer ;
2017-03-30 15:51:58 -07:00
size_t dictSize = u ? overlapSize : 0 ;
2017-01-11 17:01:28 -08:00
2018-01-26 10:44:09 -08:00
mtctx - > jobs [ u ] . srcBuff = g_nullBuffer ;
mtctx - > jobs [ u ] . prefixStart = srcStart + frameStartPos - dictSize ;
2017-12-12 14:01:54 -08:00
mtctx - > jobs [ u ] . prefixSize = dictSize ;
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ u ] . srcSize = chunkSize ; assert ( chunkSize > 0 ) ; /* avoid job.srcSize == 0 */
2018-01-17 17:18:19 -08:00
mtctx - > jobs [ u ] . consumed = 0 ;
mtctx - > jobs [ u ] . cSize = 0 ;
2017-10-13 18:32:06 -07:00
mtctx - > jobs [ u ] . cdict = ( u = = 0 ) ? cdict : NULL ;
2017-01-11 09:21:25 -08:00
mtctx - > jobs [ u ] . fullFrameSize = srcSize ;
2017-08-25 11:36:17 -07:00
mtctx - > jobs [ u ] . params = jobParams ;
2017-06-30 14:51:01 -07:00
/* do not calculate checksum within sections, but write it in header for first section */
2017-06-30 15:44:57 -07:00
if ( u ! = 0 ) mtctx - > jobs [ u ] . params . fParams . checksumFlag = 0 ;
2016-12-30 21:04:25 -08:00
mtctx - > jobs [ u ] . dstBuff = dstBuffer ;
2017-07-10 16:30:55 -07:00
mtctx - > jobs [ u ] . cctxPool = mtctx - > cctxPool ;
2017-07-11 14:59:10 -07:00
mtctx - > jobs [ u ] . bufPool = mtctx - > bufPool ;
2017-01-11 16:25:46 -08:00
mtctx - > jobs [ u ] . firstChunk = ( u = = 0 ) ;
mtctx - > jobs [ u ] . lastChunk = ( u = = nbChunks - 1 ) ;
2016-12-30 21:04:25 -08:00
2017-08-25 13:14:51 -07:00
if ( params . fParams . checksumFlag ) {
2017-07-11 17:18:26 -07:00
XXH64_update ( & xxh64 , srcStart + frameStartPos , chunkSize ) ;
}
fix a subtle issue in continue mode
The deep fuzzer tests caught a subtle bug that was probably there for a long time.
The impact of the bug is not a crash, or any other clear error signal,
rather, it reduces performance, by cutting data into smaller blocks.
Eventually, the following test would fail because it produces too many 1-byte blocks,
requiring more space than buffer can provide :
`./zstreamtest_asan --mt -s3514 -t1678312 -i1678314`
The root scenario is as follows :
- Create context, initialize it using explicit parameters or a `cdict` to pin them down, set `pledgedSrcSize=1`
- The compression parameters will not be adapted, but `windowSize` and `blockSize` will be automatically set to `1`.
`windowSize` and `blockSize` are dynamic values, set within `ZSTD_resetCCtx_internal()`.
The automatic adaptation makes it possible to generate smaller contexts for smaller input sizes.
- Complete compression
- New compression with same context, using same parameters, but `pledgedSrcSize=ZSTD_CONTENTSIZE_UNKNOWN`
trigger "continue mode"
- Continue mode doesn't modify blockSize, because it used to depend on `windowLog` only,
but in fact, it also depends on `pledgedSrcSize`.
- The "old" blocksize (1) is still there,
next compression will use this value to cut input into blocks,
resulting in more blocks and worse performance than necessary performance.
Given the scenario, and its possible variants, I'm surprised it did not show up before.
But I suspect it did show up, it's just that it never triggered an error, because "worse performance" is not a trigger.
The above test is a special corner case, where performance is so impacted that it reaches an error case.
The fix works, but I'm not completely pleased.
I think the current code relies too much on implied relations between variables.
This will likely break again in the future when some related part of the code change.
Unfortunately, no time to make larger changes if we want to keep the release target for zstd v1.3.3.
So a longer term fix will have to be considered after the release.
To do : create a reliable test case which triggers this scenario for CI tests.
2017-12-19 00:43:03 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compress_advanced_internal: posting job %u (%u bytes) " , u , ( U32 ) chunkSize ) ;
2018-01-26 10:44:09 -08:00
DEBUG_PRINTHEX ( 6 , mtctx - > jobs [ u ] . prefixStart , 12 ) ;
2017-01-11 16:25:46 -08:00
POOL_add ( mtctx - > factory , ZSTDMT_compressChunk , & mtctx - > jobs [ u ] ) ;
2016-12-30 21:04:25 -08:00
2017-01-11 16:25:46 -08:00
frameStartPos + = chunkSize ;
2017-03-31 18:27:03 -07:00
dstBufferPos + = dstBufferCapacity ;
2017-01-11 16:25:46 -08:00
remainingSrcSize - = chunkSize ;
2016-12-26 22:19:36 -08:00
} }
2016-12-30 21:04:25 -08:00
2017-07-03 15:52:19 -07:00
/* collect result */
2017-07-11 17:18:26 -07:00
{ size_t error = 0 , dstPos = 0 ;
unsigned chunkID ;
2017-01-11 16:25:46 -08:00
for ( chunkID = 0 ; chunkID < nbChunks ; chunkID + + ) {
2017-06-19 18:25:35 -07:00
DEBUGLOG ( 5 , " waiting for chunk %u " , chunkID ) ;
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & mtctx - > jobs [ chunkID ] . job_mutex ) ;
2018-01-25 17:35:49 -08:00
while ( mtctx - > jobs [ chunkID ] . consumed < mtctx - > jobs [ chunkID ] . srcSize ) {
2017-06-19 18:25:35 -07:00
DEBUGLOG ( 5 , " waiting for jobCompleted signal from chunk %u " , chunkID ) ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_cond_wait ( & mtctx - > jobs [ chunkID ] . job_cond , & mtctx - > jobs [ chunkID ] . job_mutex ) ;
2016-12-30 21:04:25 -08:00
}
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_unlock ( & mtctx - > jobs [ chunkID ] . job_mutex ) ;
2017-06-19 18:25:35 -07:00
DEBUGLOG ( 5 , " ready to write chunk %u " , chunkID ) ;
2017-01-01 08:31:33 -08:00
2018-01-26 10:44:09 -08:00
mtctx - > jobs [ chunkID ] . prefixStart = NULL ;
2017-01-11 16:25:46 -08:00
{ size_t const cSize = mtctx - > jobs [ chunkID ] . cSize ;
2017-01-11 18:06:35 -08:00
if ( ZSTD_isError ( cSize ) ) error = cSize ;
if ( ( ! error ) & & ( dstPos + cSize > dstCapacity ) ) error = ERROR ( dstSize_tooSmall ) ;
2017-07-03 15:52:19 -07:00
if ( chunkID ) { /* note : chunk 0 is written directly at dst, which is correct position */
2017-03-31 18:27:03 -07:00
if ( ! error )
2017-07-03 15:52:19 -07:00
memmove ( ( char * ) dst + dstPos , mtctx - > jobs [ chunkID ] . dstBuff . start , cSize ) ; /* may overlap when chunk compressed within dst */
if ( chunkID > = compressWithinDst ) { /* chunk compressed into its own buffer, which must be released */
2017-06-30 14:51:01 -07:00
DEBUGLOG ( 5 , " releasing buffer %u>=%u " , chunkID , compressWithinDst ) ;
2017-07-11 14:59:10 -07:00
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > jobs [ chunkID ] . dstBuff ) ;
2017-09-28 23:01:31 -07:00
} }
mtctx - > jobs [ chunkID ] . dstBuff = g_nullBuffer ;
2018-01-26 14:35:54 -08:00
mtctx - > jobs [ chunkID ] . cSize = 0 ;
2016-12-30 21:04:25 -08:00
dstPos + = cSize ;
}
2017-07-11 14:59:10 -07:00
} /* for (chunkID=0; chunkID<nbChunks; chunkID++) */
2017-07-11 17:18:26 -07:00
2017-08-25 13:14:51 -07:00
DEBUGLOG ( 4 , " checksumFlag : %u " , params . fParams . checksumFlag ) ;
if ( params . fParams . checksumFlag ) {
2017-07-11 17:18:26 -07:00
U32 const checksum = ( U32 ) XXH64_digest ( & xxh64 ) ;
if ( dstPos + 4 > dstCapacity ) {
error = ERROR ( dstSize_tooSmall ) ;
} else {
DEBUGLOG ( 4 , " writing checksum : %08X \n " , checksum ) ;
MEM_writeLE32 ( ( char * ) dst + dstPos , checksum ) ;
dstPos + = 4 ;
} }
2017-06-19 18:25:35 -07:00
if ( ! error ) DEBUGLOG ( 4 , " compressed size : %u " , ( U32 ) dstPos ) ;
2017-01-11 18:06:35 -08:00
return error ? error : dstPos ;
2016-12-30 21:04:25 -08:00
}
2017-08-18 16:17:24 -07:00
}
size_t ZSTDMT_compress_advanced ( ZSTDMT_CCtx * mtctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
const ZSTD_CDict * cdict ,
ZSTD_parameters const params ,
unsigned overlapLog )
{
ZSTD_CCtx_params cctxParams = mtctx - > params ;
cctxParams . cParams = params . cParams ;
cctxParams . fParams = params . fParams ;
2017-08-25 13:14:51 -07:00
cctxParams . overlapSizeLog = overlapLog ;
2017-08-22 14:24:47 -07:00
return ZSTDMT_compress_advanced_internal ( mtctx ,
dst , dstCapacity ,
src , srcSize ,
2017-08-25 13:14:51 -07:00
cdict , cctxParams ) ;
2017-06-30 14:51:01 -07:00
}
2016-12-26 22:19:36 -08:00
2017-06-30 14:51:01 -07:00
size_t ZSTDMT_compressCCtx ( ZSTDMT_CCtx * mtctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
int compressionLevel )
{
2017-07-13 02:22:58 -07:00
U32 const overlapLog = ( compressionLevel > = ZSTD_maxCLevel ( ) ) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT ;
2017-06-30 14:51:01 -07:00
ZSTD_parameters params = ZSTD_getParams ( compressionLevel , srcSize , 0 ) ;
params . fParams . contentSizeFlag = 1 ;
2017-07-13 02:22:58 -07:00
return ZSTDMT_compress_advanced ( mtctx , dst , dstCapacity , src , srcSize , NULL , params , overlapLog ) ;
2016-12-26 22:19:36 -08:00
}
2017-01-11 16:25:46 -08:00
/* ====================================== */
/* ======= Streaming API ======= */
/* ====================================== */
2017-08-22 14:24:47 -07:00
size_t ZSTDMT_initCStream_internal (
2018-01-25 17:35:49 -08:00
ZSTDMT_CCtx * mtctx ,
2017-08-30 14:36:54 -07:00
const void * dict , size_t dictSize , ZSTD_dictMode_e dictMode ,
2017-08-25 13:14:51 -07:00
const ZSTD_CDict * cdict , ZSTD_CCtx_params params ,
2017-08-18 16:17:24 -07:00
unsigned long long pledgedSrcSize )
2017-01-22 16:40:06 -08:00
{
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
DEBUGLOG ( 4 , " ZSTDMT_initCStream_internal (pledgedSrcSize=%u) " , ( U32 ) pledgedSrcSize ) ;
2017-06-03 01:15:02 -07:00
/* params are supposed to be fully validated at this point */
2017-08-25 13:14:51 -07:00
assert ( ! ZSTD_isError ( ZSTD_checkCParams ( params . cParams ) ) ) ;
2017-06-03 01:15:02 -07:00
assert ( ! ( ( dict ) & & ( cdict ) ) ) ; /* either dict or cdict, not both */
2018-01-25 17:35:49 -08:00
assert ( mtctx - > cctxPool - > totalCCtx = = params . nbThreads ) ;
mtctx - > singleBlockingThread = ( pledgedSrcSize < = ZSTDMT_JOBSIZE_MIN ) ; /* do not trigger multi-threading when srcSize is too small */
2018-01-17 12:39:58 -08:00
if ( params . jobSize = = 0 ) {
if ( params . cParams . windowLog > = 29 )
params . jobSize = ZSTDMT_JOBSIZE_MAX ;
else
params . jobSize = 1 < < ( params . cParams . windowLog + 2 ) ;
}
if ( params . jobSize > ZSTDMT_JOBSIZE_MAX ) params . jobSize = ZSTDMT_JOBSIZE_MAX ;
2017-06-03 01:15:02 -07:00
2018-01-25 17:35:49 -08:00
if ( mtctx - > singleBlockingThread ) {
2018-01-16 15:28:43 -08:00
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams ( params ) ;
2018-01-17 11:39:07 -08:00
DEBUGLOG ( 4 , " ZSTDMT_initCStream_internal: switch to single blocking thread mode " ) ;
2017-08-25 13:14:51 -07:00
assert ( singleThreadParams . nbThreads = = 0 ) ;
2018-01-25 17:35:49 -08:00
return ZSTD_initCStream_internal ( mtctx - > cctxPool - > cctx [ 0 ] ,
2017-08-21 18:10:44 -07:00
dict , dictSize , cdict ,
2017-08-25 13:14:51 -07:00
singleThreadParams , pledgedSrcSize ) ;
2017-06-03 01:15:02 -07:00
}
2018-01-17 11:39:07 -08:00
DEBUGLOG ( 4 , " ZSTDMT_initCStream_internal: %u threads " , params . nbThreads ) ;
2017-06-02 18:20:48 -07:00
2018-01-25 17:35:49 -08:00
if ( mtctx - > allJobsCompleted = = 0 ) { /* previous compression not correctly finished */
ZSTDMT_waitForAllJobsCompleted ( mtctx ) ;
ZSTDMT_releaseAllJobResources ( mtctx ) ;
mtctx - > allJobsCompleted = 1 ;
2017-01-18 15:18:17 -08:00
}
2017-06-03 01:15:02 -07:00
2018-01-25 17:35:49 -08:00
mtctx - > params = params ;
mtctx - > frameContentSize = pledgedSrcSize ;
2017-06-03 01:15:02 -07:00
if ( dict ) {
2018-01-25 17:35:49 -08:00
ZSTD_freeCDict ( mtctx - > cdictLocal ) ;
mtctx - > cdictLocal = ZSTD_createCDict_advanced ( dict , dictSize ,
2017-08-30 14:36:54 -07:00
ZSTD_dlm_byCopy , dictMode , /* note : a loadPrefix becomes an internal CDict */
2018-01-25 17:35:49 -08:00
params . cParams , mtctx - > cMem ) ;
mtctx - > cdict = mtctx - > cdictLocal ;
if ( mtctx - > cdictLocal = = NULL ) return ERROR ( memory_allocation ) ;
2017-06-03 01:15:02 -07:00
} else {
2018-01-25 17:35:49 -08:00
ZSTD_freeCDict ( mtctx - > cdictLocal ) ;
mtctx - > cdictLocal = NULL ;
mtctx - > cdict = cdict ;
2017-06-03 01:15:02 -07:00
}
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
assert ( params . overlapSizeLog < = 9 ) ;
2018-01-25 17:35:49 -08:00
mtctx - > targetPrefixSize = ( params . overlapSizeLog = = 0 ) ? 0 : ( size_t ) 1 < < ( params . cParams . windowLog - ( 9 - params . overlapSizeLog ) ) ;
DEBUGLOG ( 4 , " overlapLog=%u => %u KB " , params . overlapSizeLog , ( U32 ) ( mtctx - > targetPrefixSize > > 10 ) ) ;
mtctx - > targetSectionSize = params . jobSize ;
if ( mtctx - > targetSectionSize < ZSTDMT_JOBSIZE_MIN ) mtctx - > targetSectionSize = ZSTDMT_JOBSIZE_MIN ;
if ( mtctx - > targetSectionSize < mtctx - > targetPrefixSize ) mtctx - > targetSectionSize = mtctx - > targetPrefixSize ; /* job size must be >= overlap size */
DEBUGLOG ( 4 , " Job Size : %u KB (note : set to %u) " , ( U32 ) ( mtctx - > targetSectionSize > > 10 ) , params . jobSize ) ;
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . targetCapacity = mtctx - > targetPrefixSize + mtctx - > targetSectionSize ;
DEBUGLOG ( 4 , " inBuff Size : %u KB " , ( U32 ) ( mtctx - > inBuff . targetCapacity > > 10 ) ) ;
ZSTDMT_setBufferSize ( mtctx - > bufPool , MAX ( mtctx - > inBuff . targetCapacity , ZSTD_compressBound ( mtctx - > targetSectionSize ) ) ) ;
2018-01-25 17:35:49 -08:00
mtctx - > inBuff . buffer = g_nullBuffer ;
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . prefixSize = 0 ;
2018-01-25 17:35:49 -08:00
mtctx - > doneJobID = 0 ;
mtctx - > nextJobID = 0 ;
mtctx - > frameEnded = 0 ;
mtctx - > allJobsCompleted = 0 ;
mtctx - > consumed = 0 ;
mtctx - > produced = 0 ;
if ( params . fParams . checksumFlag ) XXH64_reset ( & mtctx - > xxhState , 0 ) ;
2017-01-11 16:25:46 -08:00
return 0 ;
2017-08-18 16:17:24 -07:00
}
2017-06-03 01:15:02 -07:00
size_t ZSTDMT_initCStream_advanced ( ZSTDMT_CCtx * mtctx ,
2017-07-11 14:59:10 -07:00
const void * dict , size_t dictSize ,
ZSTD_parameters params ,
unsigned long long pledgedSrcSize )
2017-01-22 16:40:06 -08:00
{
2018-01-17 11:39:07 -08:00
ZSTD_CCtx_params cctxParams = mtctx - > params ; /* retrieve sticky params */
DEBUGLOG ( 4 , " ZSTDMT_initCStream_advanced (pledgedSrcSize=%u) " , ( U32 ) pledgedSrcSize ) ;
2017-08-22 14:24:47 -07:00
cctxParams . cParams = params . cParams ;
cctxParams . fParams = params . fParams ;
2017-08-30 14:36:54 -07:00
return ZSTDMT_initCStream_internal ( mtctx , dict , dictSize , ZSTD_dm_auto , NULL ,
2017-08-22 14:24:47 -07:00
cctxParams , pledgedSrcSize ) ;
2017-06-03 01:15:02 -07:00
}
size_t ZSTDMT_initCStream_usingCDict ( ZSTDMT_CCtx * mtctx ,
const ZSTD_CDict * cdict ,
ZSTD_frameParameters fParams ,
unsigned long long pledgedSrcSize )
{
2017-08-25 13:14:51 -07:00
ZSTD_CCtx_params cctxParams = mtctx - > params ;
2018-01-17 11:39:07 -08:00
if ( cdict = = NULL ) return ERROR ( dictionary_wrong ) ; /* method incompatible with NULL cdict */
2017-08-25 17:58:28 -07:00
cctxParams . cParams = ZSTD_getCParamsFromCDict ( cdict ) ;
2017-08-25 13:14:51 -07:00
cctxParams . fParams = fParams ;
2017-08-30 14:36:54 -07:00
return ZSTDMT_initCStream_internal ( mtctx , NULL , 0 /*dictSize*/ , ZSTD_dm_auto , cdict ,
2017-08-25 13:14:51 -07:00
cctxParams , pledgedSrcSize ) ;
2017-01-22 16:40:06 -08:00
}
2017-06-03 01:15:02 -07:00
2017-01-19 16:59:56 -08:00
/* ZSTDMT_resetCStream() :
2017-10-17 14:07:43 -07:00
* pledgedSrcSize can be zero = = unknown ( for the time being )
* prefer using ZSTD_CONTENTSIZE_UNKNOWN ,
* as ` 0 ` might mean " empty " in the future */
2018-01-25 17:35:49 -08:00
size_t ZSTDMT_resetCStream ( ZSTDMT_CCtx * mtctx , unsigned long long pledgedSrcSize )
2017-01-19 16:59:56 -08:00
{
2017-10-17 14:07:43 -07:00
if ( ! pledgedSrcSize ) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN ;
2018-01-25 17:35:49 -08:00
if ( mtctx - > params . nbThreads = = 1 )
return ZSTD_resetCStream ( mtctx - > cctxPool - > cctx [ 0 ] , pledgedSrcSize ) ;
return ZSTDMT_initCStream_internal ( mtctx , NULL , 0 , ZSTD_dm_auto , 0 , mtctx - > params ,
2017-08-22 14:24:47 -07:00
pledgedSrcSize ) ;
2017-01-19 16:59:56 -08:00
}
2018-01-25 17:35:49 -08:00
size_t ZSTDMT_initCStream ( ZSTDMT_CCtx * mtctx , int compressionLevel ) {
2018-01-16 15:28:43 -08:00
ZSTD_parameters const params = ZSTD_getParams ( compressionLevel , ZSTD_CONTENTSIZE_UNKNOWN , 0 ) ;
2018-01-25 17:35:49 -08:00
ZSTD_CCtx_params cctxParams = mtctx - > params ; /* retrieve sticky params */
2018-01-17 11:39:07 -08:00
DEBUGLOG ( 4 , " ZSTDMT_initCStream (cLevel=%i) " , compressionLevel ) ;
2017-08-22 14:24:47 -07:00
cctxParams . cParams = params . cParams ;
cctxParams . fParams = params . fParams ;
2018-01-25 17:35:49 -08:00
return ZSTDMT_initCStream_internal ( mtctx , NULL , 0 , ZSTD_dm_auto , NULL , cctxParams , ZSTD_CONTENTSIZE_UNKNOWN ) ;
2017-01-19 15:32:07 -08:00
}
2017-01-11 16:25:46 -08:00
2018-01-25 17:35:49 -08:00
/* ZSTDMT_writeLastEmptyBlock()
2018-01-26 11:06:34 -08:00
* Write a single empty block with an end - of - frame to finish a frame .
* Job must be created from streaming variant .
* This function is always successfull if expected conditions are fulfilled .
2018-01-25 17:35:49 -08:00
*/
2018-01-26 11:06:34 -08:00
static void ZSTDMT_writeLastEmptyBlock ( ZSTDMT_jobDescription * job )
2018-01-25 17:35:49 -08:00
{
assert ( job - > lastChunk = = 1 ) ;
2018-01-26 10:44:09 -08:00
assert ( job - > srcSize = = 0 ) ; /* last chunk is empty -> will be simplified into a last empty block */
assert ( job - > firstChunk = = 0 ) ; /* cannot be first chunk, as it also needs to create frame header */
/* A job created by streaming variant starts with a src buffer, but no dst buffer.
* It summons a dstBuffer itself , compresses into it , then releases srcBuffer , and gives result to mtctx .
* When done , srcBuffer is empty , while dstBuffer is filled , and will be released by mtctx .
* This shortcut will simply switch srcBuffer for dstBuffer , providing same outcome as a normal job */
2018-01-26 11:06:34 -08:00
assert ( job - > dstBuff . start = = NULL ) ; /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
assert ( job - > srcBuff . start ! = NULL ) ; /* invoked from streaming variant only (otherwise, srcBuff might be user's input) */
2018-01-26 13:00:14 -08:00
assert ( job - > srcBuff . capacity > = ZSTD_blockHeaderSize ) ; /* no buffer should ever be that small */
2018-01-26 10:44:09 -08:00
job - > dstBuff = job - > srcBuff ;
job - > srcBuff = g_nullBuffer ;
2018-01-26 13:00:14 -08:00
job - > cSize = ZSTD_writeLastEmptyBlock ( job - > dstBuff . start , job - > dstBuff . capacity ) ;
2018-01-26 10:44:09 -08:00
assert ( ! ZSTD_isError ( job - > cSize ) ) ;
assert ( job - > consumed = = 0 ) ;
2018-01-25 17:35:49 -08:00
}
static size_t ZSTDMT_createCompressionJob ( ZSTDMT_CCtx * mtctx , size_t srcSize , ZSTD_EndDirective endOp )
2017-01-24 17:41:49 -08:00
{
2018-01-25 17:35:49 -08:00
unsigned const jobID = mtctx - > nextJobID & mtctx - > jobIDMask ;
2018-01-23 15:52:40 -08:00
int const endFrame = ( endOp = = ZSTD_e_end ) ;
2017-01-24 17:41:49 -08:00
2018-01-25 17:35:49 -08:00
if ( mtctx - > nextJobID > mtctx - > doneJobID + mtctx - > jobIDMask ) {
2018-01-19 17:35:08 -08:00
DEBUGLOG ( 5 , " ZSTDMT_createCompressionJob: will not create new job : table is full " ) ;
2018-01-25 17:35:49 -08:00
assert ( ( mtctx - > nextJobID & mtctx - > jobIDMask ) = = ( mtctx - > doneJobID & mtctx - > jobIDMask ) ) ;
2018-01-19 17:35:08 -08:00
return 0 ;
}
2018-01-19 13:19:59 -08:00
2018-01-25 17:35:49 -08:00
if ( ! mtctx - > jobReady ) {
2018-01-19 10:01:40 -08:00
DEBUGLOG ( 5 , " ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload " ,
2018-01-26 13:00:14 -08:00
mtctx - > nextJobID , ( U32 ) srcSize , ( U32 ) mtctx - > inBuff . prefixSize ) ;
2018-01-26 11:06:34 -08:00
assert ( mtctx - > jobs [ jobID ] . srcBuff . start = = NULL ) ; /* no buffer left : supposed already released */
2018-01-26 10:44:09 -08:00
mtctx - > jobs [ jobID ] . srcBuff = mtctx - > inBuff . buffer ;
mtctx - > jobs [ jobID ] . prefixStart = mtctx - > inBuff . buffer . start ;
2018-01-26 13:00:14 -08:00
mtctx - > jobs [ jobID ] . prefixSize = mtctx - > inBuff . prefixSize ;
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ jobID ] . srcSize = srcSize ;
2018-01-26 13:00:14 -08:00
assert ( mtctx - > inBuff . filled > = srcSize + mtctx - > inBuff . prefixSize ) ;
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ jobID ] . consumed = 0 ;
mtctx - > jobs [ jobID ] . cSize = 0 ;
mtctx - > jobs [ jobID ] . params = mtctx - > params ;
2018-01-19 10:01:40 -08:00
/* do not calculate checksum within sections, but write it in header for first section */
2018-01-25 17:35:49 -08:00
if ( mtctx - > nextJobID ) mtctx - > jobs [ jobID ] . params . fParams . checksumFlag = 0 ;
mtctx - > jobs [ jobID ] . cdict = mtctx - > nextJobID = = 0 ? mtctx - > cdict : NULL ;
mtctx - > jobs [ jobID ] . fullFrameSize = mtctx - > frameContentSize ;
mtctx - > jobs [ jobID ] . dstBuff = g_nullBuffer ;
mtctx - > jobs [ jobID ] . cctxPool = mtctx - > cctxPool ;
mtctx - > jobs [ jobID ] . bufPool = mtctx - > bufPool ;
mtctx - > jobs [ jobID ] . firstChunk = ( mtctx - > nextJobID = = 0 ) ;
mtctx - > jobs [ jobID ] . lastChunk = endFrame ;
mtctx - > jobs [ jobID ] . frameChecksumNeeded = endFrame & & ( mtctx - > nextJobID > 0 ) & & mtctx - > params . fParams . checksumFlag ;
mtctx - > jobs [ jobID ] . dstFlushed = 0 ;
if ( mtctx - > params . fParams . checksumFlag )
2018-01-26 13:00:14 -08:00
XXH64_update ( & mtctx - > xxhState , ( const char * ) mtctx - > inBuff . buffer . start + mtctx - > inBuff . prefixSize , srcSize ) ;
2018-01-19 10:01:40 -08:00
/* get a new buffer for next input */
if ( ! endFrame ) {
2018-01-26 13:00:14 -08:00
size_t const newPrefixSize = MIN ( mtctx - > inBuff . filled , mtctx - > targetPrefixSize ) ;
2018-01-25 17:35:49 -08:00
mtctx - > inBuff . buffer = ZSTDMT_getBuffer ( mtctx - > bufPool ) ;
2018-01-26 13:00:14 -08:00
if ( mtctx - > inBuff . buffer . start = = NULL ) { /* not enough memory to allocate a new input buffer */
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ jobID ] . srcSize = mtctx - > jobs [ jobID ] . consumed = 0 ;
mtctx - > nextJobID + + ;
ZSTDMT_waitForAllJobsCompleted ( mtctx ) ;
ZSTDMT_releaseAllJobResources ( mtctx ) ;
2018-01-19 10:01:40 -08:00
return ERROR ( memory_allocation ) ;
}
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . filled - = ( mtctx - > inBuff . prefixSize + srcSize ) - newPrefixSize ;
2018-01-25 17:35:49 -08:00
memmove ( mtctx - > inBuff . buffer . start , /* copy end of current job into next job, as "prefix" */
2018-01-26 13:00:14 -08:00
( const char * ) mtctx - > jobs [ jobID ] . prefixStart + mtctx - > inBuff . prefixSize + srcSize - newPrefixSize ,
2018-01-25 17:35:49 -08:00
mtctx - > inBuff . filled ) ;
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . prefixSize = newPrefixSize ;
2018-01-19 10:01:40 -08:00
} else { /* endFrame==1 => no need for another input buffer */
2018-01-25 17:35:49 -08:00
mtctx - > inBuff . buffer = g_nullBuffer ;
mtctx - > inBuff . filled = 0 ;
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . prefixSize = 0 ;
2018-01-25 17:35:49 -08:00
mtctx - > frameEnded = endFrame ;
if ( mtctx - > nextJobID = = 0 ) {
/* single chunk exception : checksum is already calculated directly within worker thread */
mtctx - > params . fParams . checksumFlag = 0 ;
} }
2017-01-24 17:41:49 -08:00
2018-01-25 17:35:49 -08:00
if ( ( srcSize = = 0 )
& & ( mtctx - > nextJobID > 0 ) /*single chunk must also write frame header*/ ) {
2018-01-26 12:15:43 -08:00
DEBUGLOG ( 5 , " ZSTDMT_createCompressionJob: creating a last empty block to end frame " ) ;
2018-01-25 17:35:49 -08:00
assert ( endOp = = ZSTD_e_end ) ; /* only possible case : need to end the frame with an empty last block */
2018-01-26 11:06:34 -08:00
ZSTDMT_writeLastEmptyBlock ( mtctx - > jobs + jobID ) ;
2018-01-25 17:35:49 -08:00
mtctx - > nextJobID + + ;
return 0 ;
}
}
2018-01-26 12:15:43 -08:00
DEBUGLOG ( 5 , " ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u)) " ,
2018-01-25 17:35:49 -08:00
mtctx - > nextJobID ,
( U32 ) mtctx - > jobs [ jobID ] . srcSize ,
mtctx - > jobs [ jobID ] . lastChunk ,
2018-01-26 12:15:43 -08:00
mtctx - > nextJobID ,
jobID ) ;
2018-01-25 17:35:49 -08:00
if ( POOL_tryAdd ( mtctx - > factory , ZSTDMT_compressChunk , & mtctx - > jobs [ jobID ] ) ) {
mtctx - > nextJobID + + ;
mtctx - > jobReady = 0 ;
2018-01-19 10:01:40 -08:00
} else {
2018-01-25 17:35:49 -08:00
DEBUGLOG ( 5 , " ZSTDMT_createCompressionJob: no worker available for job %u " , mtctx - > nextJobID ) ;
mtctx - > jobReady = 1 ;
2018-01-19 10:01:40 -08:00
}
2017-01-24 17:41:49 -08:00
return 0 ;
}
2018-01-19 17:35:08 -08:00
/*! ZSTDMT_flushProduced() :
* ` output ` : ` pos ` will be updated with amount of data flushed .
2018-01-18 16:20:26 -08:00
* ` blockToFlush ` : if > 0 , the function will block and wait if there is no data available to flush .
* @ return : amount of data remaining within internal buffer , 0 if no more , 1 if unknown but > 0 , or an error code */
2018-01-25 17:35:49 -08:00
static size_t ZSTDMT_flushProduced ( ZSTDMT_CCtx * mtctx , ZSTD_outBuffer * output , unsigned blockToFlush , ZSTD_EndDirective end )
2017-01-23 11:43:51 -08:00
{
2018-01-25 17:35:49 -08:00
unsigned const wJobID = mtctx - > doneJobID & mtctx - > jobIDMask ;
2018-01-26 12:15:43 -08:00
DEBUGLOG ( 5 , " ZSTDMT_flushProduced (blocking:%u , job %u <= %u) " ,
blockToFlush , mtctx - > doneJobID , mtctx - > nextJobID ) ;
2018-01-19 17:35:08 -08:00
assert ( output - > size > = output - > pos ) ;
2018-01-26 17:48:33 -08:00
ZSTD_PTHREAD_MUTEX_LOCK ( & mtctx - > jobs [ wJobID ] . job_mutex ) ;
2018-01-26 12:15:43 -08:00
if ( blockToFlush
& & ( mtctx - > doneJobID < mtctx - > nextJobID ) ) {
2018-01-25 17:35:49 -08:00
assert ( mtctx - > jobs [ wJobID ] . dstFlushed < = mtctx - > jobs [ wJobID ] . cSize ) ;
2018-01-26 12:15:43 -08:00
while ( mtctx - > jobs [ wJobID ] . dstFlushed = = mtctx - > jobs [ wJobID ] . cSize ) { /* nothing to flush */
2018-01-25 17:35:49 -08:00
if ( mtctx - > jobs [ wJobID ] . consumed = = mtctx - > jobs [ wJobID ] . srcSize ) {
2018-01-26 12:15:43 -08:00
DEBUGLOG ( 5 , " job %u is completely consumed (%u == %u) => don't wait for cond, there will be none " ,
2018-01-25 17:35:49 -08:00
mtctx - > doneJobID , ( U32 ) mtctx - > jobs [ wJobID ] . consumed , ( U32 ) mtctx - > jobs [ wJobID ] . srcSize ) ;
break ;
}
2018-01-19 17:35:08 -08:00
DEBUGLOG ( 5 , " waiting for something to flush from job %u (currently flushed: %u bytes) " ,
2018-01-25 17:35:49 -08:00
mtctx - > doneJobID , ( U32 ) mtctx - > jobs [ wJobID ] . dstFlushed ) ;
2018-01-26 17:48:33 -08:00
ZSTD_pthread_cond_wait ( & mtctx - > jobs [ wJobID ] . job_cond , & mtctx - > jobs [ wJobID ] . job_mutex ) ; /* block when nothing to flush but some to come */
2018-01-19 17:35:08 -08:00
} }
2018-01-18 11:03:27 -08:00
2018-01-26 12:15:43 -08:00
/* try to flush something */
2018-01-26 17:08:58 -08:00
{ size_t cSize = mtctx - > jobs [ wJobID ] . cSize ; /* shared */
size_t const srcConsumed = mtctx - > jobs [ wJobID ] . consumed ; /* shared */
size_t const srcSize = mtctx - > jobs [ wJobID ] . srcSize ; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
2018-01-26 17:48:33 -08:00
ZSTD_pthread_mutex_unlock ( & mtctx - > jobs [ wJobID ] . job_mutex ) ;
2018-01-26 12:15:43 -08:00
if ( ZSTD_isError ( cSize ) ) {
2018-01-19 17:35:08 -08:00
DEBUGLOG ( 5 , " ZSTDMT_flushProduced: job %u : compression error detected : %s " ,
2018-01-26 12:15:43 -08:00
mtctx - > doneJobID , ZSTD_getErrorName ( cSize ) ) ;
2018-01-25 17:35:49 -08:00
ZSTDMT_waitForAllJobsCompleted ( mtctx ) ;
ZSTDMT_releaseAllJobResources ( mtctx ) ;
2018-01-26 12:15:43 -08:00
return cSize ;
2018-01-18 11:03:27 -08:00
}
2018-01-19 17:35:08 -08:00
/* add frame checksum if necessary (can only happen once) */
2018-01-26 17:08:58 -08:00
assert ( srcConsumed < = srcSize ) ;
if ( ( srcConsumed = = srcSize ) /* job completed -> worker no longer active */
2018-01-26 12:15:43 -08:00
& & mtctx - > jobs [ wJobID ] . frameChecksumNeeded ) {
2018-01-25 17:35:49 -08:00
U32 const checksum = ( U32 ) XXH64_digest ( & mtctx - > xxhState ) ;
2018-01-23 15:19:11 -08:00
DEBUGLOG ( 4 , " ZSTDMT_flushProduced: writing checksum : %08X \n " , checksum ) ;
2018-01-26 12:15:43 -08:00
MEM_writeLE32 ( ( char * ) mtctx - > jobs [ wJobID ] . dstBuff . start + mtctx - > jobs [ wJobID ] . cSize , checksum ) ;
cSize + = 4 ;
mtctx - > jobs [ wJobID ] . cSize + = 4 ; /* can write this shared value, as worker is no longer active */
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ wJobID ] . frameChecksumNeeded = 0 ;
2017-01-24 11:48:40 -08:00
}
2018-01-26 14:35:54 -08:00
if ( cSize > 0 ) { /* compression is ongoing or completed */
2018-01-26 17:08:58 -08:00
size_t const toFlush = MIN ( cSize - mtctx - > jobs [ wJobID ] . dstFlushed , output - > size - output - > pos ) ;
2018-01-26 14:35:54 -08:00
DEBUGLOG ( 5 , " ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u) " ,
2018-01-26 17:08:58 -08:00
( U32 ) toFlush , mtctx - > doneJobID , ( U32 ) srcConsumed , ( U32 ) srcSize , ( U32 ) cSize ) ;
2018-01-26 14:35:54 -08:00
assert ( mtctx - > doneJobID < mtctx - > nextJobID ) ;
2018-01-26 12:15:43 -08:00
assert ( cSize > = mtctx - > jobs [ wJobID ] . dstFlushed ) ;
2018-01-26 14:35:54 -08:00
assert ( mtctx - > jobs [ wJobID ] . dstBuff . start ! = NULL ) ;
2018-01-26 17:08:58 -08:00
memcpy ( ( char * ) output - > dst + output - > pos ,
( const char * ) mtctx - > jobs [ wJobID ] . dstBuff . start + mtctx - > jobs [ wJobID ] . dstFlushed ,
toFlush ) ;
output - > pos + = toFlush ;
mtctx - > jobs [ wJobID ] . dstFlushed + = toFlush ; /* can write : this value is only used by mtctx */
2018-01-19 17:35:08 -08:00
2018-01-26 17:08:58 -08:00
if ( ( srcConsumed = = srcSize ) /* job completed */
2018-01-26 12:15:43 -08:00
& & ( mtctx - > jobs [ wJobID ] . dstFlushed = = cSize ) ) { /* output buffer fully flushed => free this job position */
2018-01-19 17:35:08 -08:00
DEBUGLOG ( 5 , " Job %u completed (%u bytes), moving to next one " ,
2018-01-26 12:15:43 -08:00
mtctx - > doneJobID , ( U32 ) mtctx - > jobs [ wJobID ] . dstFlushed ) ;
assert ( mtctx - > jobs [ wJobID ] . srcBuff . start = = NULL ) ; /* srcBuff supposed already released */
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > jobs [ wJobID ] . dstBuff ) ;
2018-01-25 17:35:49 -08:00
mtctx - > jobs [ wJobID ] . dstBuff = g_nullBuffer ;
2018-01-26 12:15:43 -08:00
mtctx - > jobs [ wJobID ] . cSize = 0 ; /* ensure this job slot is considered "not started" in future check */
2018-01-26 17:08:58 -08:00
mtctx - > consumed + = srcSize ;
2018-01-26 12:15:43 -08:00
mtctx - > produced + = cSize ;
2018-01-25 17:35:49 -08:00
mtctx - > doneJobID + + ;
2018-01-19 17:35:08 -08:00
} }
2018-01-18 16:20:26 -08:00
/* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
2018-01-26 12:15:43 -08:00
if ( cSize > mtctx - > jobs [ wJobID ] . dstFlushed ) return ( cSize - mtctx - > jobs [ wJobID ] . dstFlushed ) ;
2018-01-26 17:08:58 -08:00
if ( srcSize > srcConsumed ) return 1 ; /* current job not completely compressed */
2018-01-19 17:35:08 -08:00
}
2018-01-26 12:15:43 -08:00
if ( mtctx - > doneJobID < mtctx - > nextJobID ) return 1 ; /* some more jobs ongoing */
if ( mtctx - > jobReady ) return 1 ; /* one job is ready to push, just not yet in the list */
if ( mtctx - > inBuff . filled > 0 ) return 1 ; /* input is not empty, and still needs to be converted into a job */
mtctx - > allJobsCompleted = mtctx - > frameEnded ; /* all chunks are entirely flushed => if this one is last one, frame is completed */
if ( end = = ZSTD_e_end ) return ! mtctx - > frameEnded ; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
return 0 ; /* internal buffers fully flushed */
2018-01-19 17:35:08 -08:00
}
2017-01-23 11:43:51 -08:00
2017-06-30 14:51:01 -07:00
/** ZSTDMT_compressStream_generic() :
2017-09-28 11:46:19 -07:00
* internal use only - exposed to be invoked from zstd_compress . c
2017-06-30 14:51:01 -07:00
* assumption : output and input are valid ( pos < = size )
* @ return : minimum amount of data remaining to flush , 0 if none */
size_t ZSTDMT_compressStream_generic ( ZSTDMT_CCtx * mtctx ,
ZSTD_outBuffer * output ,
ZSTD_inBuffer * input ,
ZSTD_EndDirective endOp )
{
2018-01-26 13:00:14 -08:00
size_t const newJobThreshold = mtctx - > inBuff . prefixSize + mtctx - > targetSectionSize ;
2017-09-28 11:46:19 -07:00
unsigned forwardInputProgress = 0 ;
2018-01-26 14:35:54 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u) " ,
( U32 ) endOp , ( U32 ) ( input - > size - input - > pos ) ) ;
2017-06-30 14:51:01 -07:00
assert ( output - > pos < = output - > size ) ;
assert ( input - > pos < = input - > size ) ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
2018-01-16 15:28:43 -08:00
if ( mtctx - > singleBlockingThread ) { /* delegate to single-thread (synchronous) */
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return ZSTD_compressStream_generic ( mtctx - > cctxPool - > cctx [ 0 ] , output , input , endOp ) ;
}
2017-06-30 14:51:01 -07:00
if ( ( mtctx - > frameEnded ) & & ( endOp = = ZSTD_e_continue ) ) {
2017-09-28 02:14:48 -07:00
/* current frame being ended. Only flush/end are allowed */
2017-06-30 14:51:01 -07:00
return ERROR ( stage_wrong ) ;
}
2017-06-30 15:44:57 -07:00
2017-09-28 11:46:19 -07:00
/* single-pass shortcut (note : synchronous-mode) */
2018-01-26 12:15:43 -08:00
if ( ( mtctx - > nextJobID = = 0 ) /* just started */
& & ( mtctx - > inBuff . filled = = 0 ) /* nothing buffered */
& & ( ! mtctx - > jobReady ) /* no job already created */
& & ( endOp = = ZSTD_e_end ) /* end order */
2018-01-25 14:52:34 -08:00
& & ( output - > size - output - > pos > = ZSTD_compressBound ( input - > size - input - > pos ) ) ) { /* enough space in dst */
2017-08-22 14:24:47 -07:00
size_t const cSize = ZSTDMT_compress_advanced_internal ( mtctx ,
2017-06-30 14:51:01 -07:00
( char * ) output - > dst + output - > pos , output - > size - output - > pos ,
( const char * ) input - > src + input - > pos , input - > size - input - > pos ,
2017-08-25 13:14:51 -07:00
mtctx - > cdict , mtctx - > params ) ;
2017-06-30 14:51:01 -07:00
if ( ZSTD_isError ( cSize ) ) return cSize ;
input - > pos = input - > size ;
output - > pos + = cSize ;
2017-07-11 14:59:10 -07:00
ZSTDMT_releaseBuffer ( mtctx - > bufPool , mtctx - > inBuff . buffer ) ; /* was allocated in initStream */
2017-06-30 14:51:01 -07:00
mtctx - > allJobsCompleted = 1 ;
mtctx - > frameEnded = 1 ;
return 0 ;
2017-07-04 10:36:41 -07:00
}
2017-06-30 14:51:01 -07:00
/* fill input buffer */
2018-01-19 10:01:40 -08:00
if ( ( ! mtctx - > jobReady )
& & ( input - > size > input - > pos ) ) { /* support NULL input */
2017-07-10 17:16:41 -07:00
if ( mtctx - > inBuff . buffer . start = = NULL ) {
2018-01-26 13:00:14 -08:00
mtctx - > inBuff . buffer = ZSTDMT_getBuffer ( mtctx - > bufPool ) ; /* note : allocation can fail, in which case, buffer.start==NULL */
2017-07-10 17:16:41 -07:00
mtctx - > inBuff . filled = 0 ;
2018-01-26 13:00:14 -08:00
if ( ( mtctx - > inBuff . buffer . start = = NULL ) /* allocation failure */
2018-01-17 12:10:15 -08:00
& & ( mtctx - > doneJobID = = mtctx - > nextJobID ) ) { /* and nothing to flush */
2018-01-26 13:00:14 -08:00
return ERROR ( memory_allocation ) ; /* no forward progress possible => output an error */
}
assert ( mtctx - > inBuff . buffer . capacity > = mtctx - > inBuff . targetCapacity ) ; /* pool must provide a buffer >= targetCapacity */
}
if ( mtctx - > inBuff . buffer . start ! = NULL ) { /* no buffer for input, but it's possible to flush, and then reclaim the buffer */
size_t const toLoad = MIN ( input - > size - input - > pos , mtctx - > inBuff . targetCapacity - mtctx - > inBuff . filled ) ;
2018-01-19 10:01:40 -08:00
DEBUGLOG ( 5 , " ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u " ,
2018-01-26 13:00:14 -08:00
( U32 ) toLoad , ( U32 ) mtctx - > inBuff . filled , ( U32 ) mtctx - > inBuff . targetCapacity ) ;
2017-07-10 17:16:41 -07:00
memcpy ( ( char * ) mtctx - > inBuff . buffer . start + mtctx - > inBuff . filled , ( const char * ) input - > src + input - > pos , toLoad ) ;
input - > pos + = toLoad ;
mtctx - > inBuff . filled + = toLoad ;
2017-09-28 11:46:19 -07:00
forwardInputProgress = toLoad > 0 ;
2018-01-23 13:12:40 -08:00
}
if ( ( input - > pos < input - > size ) & & ( endOp = = ZSTD_e_end ) )
endOp = ZSTD_e_flush ; /* can't end now : not all input consumed */
}
2017-06-30 14:51:01 -07:00
2018-01-19 10:01:40 -08:00
if ( ( mtctx - > jobReady )
2018-01-19 13:19:59 -08:00
| | ( mtctx - > inBuff . filled > = newJobThreshold ) /* filled enough : let's compress */
2018-01-23 13:12:40 -08:00
| | ( ( endOp ! = ZSTD_e_continue ) & & ( mtctx - > inBuff . filled > 0 ) ) /* something to flush : let's go */
| | ( ( endOp = = ZSTD_e_end ) & & ( ! mtctx - > frameEnded ) ) ) { /* must finish the frame with a zero-size block */
2018-01-26 13:00:14 -08:00
size_t const jobSize = MIN ( mtctx - > inBuff . filled - mtctx - > inBuff . prefixSize , mtctx - > targetSectionSize ) ;
2018-01-23 15:52:40 -08:00
CHECK_F ( ZSTDMT_createCompressionJob ( mtctx , jobSize , endOp ) ) ;
2017-06-30 14:51:01 -07:00
}
/* check for potential compressed data ready to be flushed */
2018-01-23 15:19:11 -08:00
{ size_t const remainingToFlush = ZSTDMT_flushProduced ( mtctx , output , ! forwardInputProgress , endOp ) ; /* block if there was no forward input progress */
2018-01-25 14:52:34 -08:00
if ( input - > pos < input - > size ) return MAX ( remainingToFlush , 1 ) ; /* input not consumed : do not end flush yet */
2018-01-19 13:19:59 -08:00
return remainingToFlush ;
2017-06-30 14:51:01 -07:00
}
}
2018-01-25 17:35:49 -08:00
size_t ZSTDMT_compressStream ( ZSTDMT_CCtx * mtctx , ZSTD_outBuffer * output , ZSTD_inBuffer * input )
2017-01-11 16:25:46 -08:00
{
2018-01-25 17:35:49 -08:00
CHECK_F ( ZSTDMT_compressStream_generic ( mtctx , output , input , ZSTD_e_continue ) ) ;
2017-01-24 11:48:40 -08:00
2017-01-11 16:25:46 -08:00
/* recommended next input size : fill current input buffer */
2018-01-26 13:00:14 -08:00
return mtctx - > inBuff . targetCapacity - mtctx - > inBuff . filled ; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
2017-01-17 15:31:16 -08:00
}
2017-01-17 16:15:18 -08:00
2018-01-23 15:19:11 -08:00
static size_t ZSTDMT_flushStream_internal ( ZSTDMT_CCtx * mtctx , ZSTD_outBuffer * output , ZSTD_EndDirective endFrame )
2017-01-17 15:31:16 -08:00
{
2018-01-26 13:00:14 -08:00
size_t const srcSize = mtctx - > inBuff . filled - mtctx - > inBuff . prefixSize ;
2017-12-12 16:20:51 -08:00
DEBUGLOG ( 5 , " ZSTDMT_flushStream_internal " ) ;
2017-01-17 15:31:16 -08:00
2018-01-19 13:19:59 -08:00
if ( mtctx - > jobReady /* one job ready for a worker to pick up */
| | ( srcSize > 0 ) /* still some data within input buffer */
2018-01-23 15:19:11 -08:00
| | ( ( endFrame = = ZSTD_e_end ) & & ! mtctx - > frameEnded ) ) { /* need a last 0-size block to end frame */
2018-01-19 17:35:08 -08:00
DEBUGLOG ( 5 , " ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u) " ,
2018-01-23 15:19:11 -08:00
( U32 ) srcSize , ( U32 ) endFrame ) ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
CHECK_F ( ZSTDMT_createCompressionJob ( mtctx , srcSize , endFrame ) ) ;
2017-01-17 15:31:16 -08:00
}
/* check if there is any data available to flush */
2018-01-23 15:19:11 -08:00
return ZSTDMT_flushProduced ( mtctx , output , 1 /* blockToFlush */ , endFrame ) ;
2017-01-17 15:31:16 -08:00
}
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
size_t ZSTDMT_flushStream ( ZSTDMT_CCtx * mtctx , ZSTD_outBuffer * output )
2017-01-17 15:31:16 -08:00
{
2017-06-19 18:25:35 -07:00
DEBUGLOG ( 5 , " ZSTDMT_flushStream " ) ;
2018-01-16 15:28:43 -08:00
if ( mtctx - > singleBlockingThread )
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return ZSTD_flushStream ( mtctx - > cctxPool - > cctx [ 0 ] , output ) ;
2018-01-23 15:19:11 -08:00
return ZSTDMT_flushStream_internal ( mtctx , output , ZSTD_e_flush ) ;
2017-01-17 15:31:16 -08:00
}
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
size_t ZSTDMT_endStream ( ZSTDMT_CCtx * mtctx , ZSTD_outBuffer * output )
2017-01-17 15:31:16 -08:00
{
2017-06-30 14:51:01 -07:00
DEBUGLOG ( 4 , " ZSTDMT_endStream " ) ;
2018-01-16 15:28:43 -08:00
if ( mtctx - > singleBlockingThread )
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 12:18:56 -08:00
return ZSTD_endStream ( mtctx - > cctxPool - > cctx [ 0 ] , output ) ;
2018-01-23 15:19:11 -08:00
return ZSTDMT_flushStream_internal ( mtctx , output , ZSTD_e_end ) ;
2017-01-11 16:25:46 -08:00
}