rateLimiter does not "catch up" when input speed is slow

This commit is contained in:
Yann Collet 2018-08-13 11:38:55 -07:00
parent e7a49c6683
commit f3aa510738
3 changed files with 15 additions and 5 deletions

View File

@ -837,9 +837,13 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
csuzfp = zfp;
lastFlushedSize = compressedfilesize;
assert(inputPresented > 0);
DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n",
inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100,
(U32)newlyIngested, (U32)newlyConsumed,
(U32)newlyFlushed, (U32)newlyProduced);
if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */
&& (newlyFlushed * 17 / 16 > newlyProduced) /* flush everything that is produced */
&& (newlyIngested * 17 / 16 > newlyConsumed) /* can't keep up with input speed */
&& (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */
&& (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */
) {
DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n",
newlyIngested, newlyConsumed, newlyProduced, newlyFlushed);

View File

@ -139,6 +139,7 @@ the last one takes effect.
The current compression level can be observed live by using command `-v`.
Works with multi-threading and `--long` mode.
Does not work with `--single-thread`.
Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible.
* `-D file`:
use `file` as Dictionary to compress or decompress FILE(s)
* `--no-dictID`:

View File

@ -19,7 +19,7 @@ import time
MB = 1024 * 1024
rate = float(sys.argv[1]) * MB
rate *= 1.25 # compensation for excluding write time (experimentally determined)
rate *= 1.4 # compensation for excluding i/o time (experimentally determined)
start = time.time()
total_read = 0
@ -29,9 +29,14 @@ while len(buf):
to_read = max(int(rate * (now - start) - total_read), 1)
max_buf_size = 1 * MB
to_read = min(to_read, max_buf_size)
read_start = time.time()
buf = sys.stdin.buffer.read(to_read)
write_start = time.time()
write_start = read_end = time.time()
sys.stdout.buffer.write(buf)
write_end = time.time()
start += write_end - write_start # exclude write delay
wait_time = max(read_end - read_start, write_end - write_start)
start += wait_time # exclude delay of the slowest
total_read += len(buf)