summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--NEWS6
-rw-r--r--doc/lz4_Block_format.md63
-rw-r--r--doc/lz4_manual.html12
-rw-r--r--doc/lz4frame_manual.html63
-rw-r--r--examples/frameCompress.c3
-rw-r--r--lib/Makefile6
-rw-r--r--lib/lz4.c292
-rw-r--r--lib/lz4.h12
-rw-r--r--lib/lz4frame.c62
-rw-r--r--lib/lz4frame.h129
-rw-r--r--lib/lz4frame_static.h117
-rw-r--r--lib/lz4hc.c253
-rw-r--r--lib/lz4hc.h101
-rw-r--r--programs/lz4cli.c2
-rw-r--r--programs/lz4io.c10
-rw-r--r--programs/lz4io.h7
-rw-r--r--programs/util.h3
-rw-r--r--tests/Makefile3
-rw-r--r--tests/frametest.c6
-rw-r--r--tests/fullbench.c18
-rw-r--r--tests/fuzzer.c75
21 files changed, 798 insertions, 445 deletions
diff --git a/NEWS b/NEWS
index 9ac41f2..c3b47f1 100644
--- a/NEWS
+++ b/NEWS
@@ -1,7 +1,13 @@
v1.8.2
+perf: *much* faster dictionary compression on small files, by @felixhandte
perf: slightly faster HC compression and decompression speed
perf: very small compression ratio improvement
+fix : compression compatible with low memory addresses (< 0xFFFF)
+fix : decompression segfault when provided with NULL input, by @terrelln
+cli : new command --favor-decSpeed
cli : benchmark mode more accurate for small inputs
+fullbench : can measure _destSize() variants, by @felixhandte
+doc : clarified block format parsing restrictions, by Alexey Tourbin (@svpv)
v1.8.1
perf : faster and stronger ultra modes (levels 10+)
diff --git a/doc/lz4_Block_format.md b/doc/lz4_Block_format.md
index 4e39b41..5438730 100644
--- a/doc/lz4_Block_format.md
+++ b/doc/lz4_Block_format.md
@@ -1,6 +1,6 @@
LZ4 Block Format Description
============================
-Last revised: 2015-05-07.
+Last revised: 2018-04-25.
Author : Yann Collet
@@ -29,8 +29,8 @@ An LZ4 compressed block is composed of sequences.
A sequence is a suite of literals (not-compressed bytes),
followed by a match copy.
-Each sequence starts with a token.
-The token is a one byte value, separated into two 4-bits fields.
+Each sequence starts with a `token`.
+The `token` is a one byte value, separated into two 4-bits fields.
Therefore each field ranges from 0 to 15.
@@ -42,46 +42,46 @@ If it is 15, then we need to add some more bytes to indicate the full length.
Each additional byte then represent a value from 0 to 255,
which is added to the previous value to produce a total length.
When the byte value is 255, another byte is output.
-There can be any number of bytes following the token. There is no "size limit".
+There can be any number of bytes following `token`. There is no "size limit".
(Side note : this is why a not-compressible input block is expanded by 0.4%).
-Example 1 : A length of 48 will be represented as :
+Example 1 : A literal length of 48 will be represented as :
- 15 : value for the 4-bits High field
- 33 : (=48-15) remaining length to reach 48
-Example 2 : A length of 280 will be represented as :
+Example 2 : A literal length of 280 will be represented as :
- 15 : value for the 4-bits High field
- 255 : following byte is maxed, since 280-15 >= 255
- 10 : (=280 - 15 - 255) ) remaining length to reach 280
-Example 3 : A length of 15 will be represented as :
+Example 3 : A literal length of 15 will be represented as :
- 15 : value for the 4-bits High field
- 0 : (=15-15) yes, the zero must be output
-Following the token and optional length bytes, are the literals themselves.
+Following `token` and optional length bytes, are the literals themselves.
They are exactly as numerous as previously decoded (length of literals).
It's possible that there are zero literal.
Following the literals is the match copy operation.
-It starts by the offset.
+It starts by the `offset`.
This is a 2 bytes value, in little endian format
(the 1st byte is the "low" byte, the 2nd one is the "high" byte).
-The offset represents the position of the match to be copied from.
+The `offset` represents the position of the match to be copied from.
1 means "current position - 1 byte".
-The maximum offset value is 65535, 65536 cannot be coded.
+The maximum `offset` value is 65535, 65536 cannot be coded.
Note that 0 is an invalid value, not used.
-Then we need to extract the match length.
+Then we need to extract the `matchlength`.
For this, we use the second token field, the low 4-bits.
Value, obviously, ranges from 0 to 15.
However here, 0 means that the copy operation will be minimal.
-The minimum length of a match, called minmatch, is 4.
+The minimum length of a match, called `minmatch`, is 4.
As a consequence, a 0 value means 4 bytes, and a value of 15 means 19+ bytes.
Similar to literal length, on reaching the highest possible value (15),
we output additional bytes, one at a time, with values ranging from 0 to 255.
@@ -90,18 +90,18 @@ A 255 value means there is another byte to read and add.
There is no limit to the number of optional bytes that can be output this way.
(This points towards a maximum achievable compression ratio of about 250).
-Decoding the matchlength reaches the end of current sequence.
+Decoding the `matchlength` reaches the end of current sequence.
Next byte will be the start of another sequence.
But before moving to next sequence,
it's time to use the decoded match position and length.
-The decoder copies matchlength bytes from match position to current position.
+The decoder copies `matchlength` bytes from match position to current position.
-In some cases, matchlength is larger than offset.
-Therefore, match pos + match length > current pos,
+In some cases, `matchlength` is larger than `offset`.
+Therefore, `match_pos + matchlength > current_pos`,
which means that later bytes to copy are not yet decoded.
This is called an "overlap match", and must be handled with special care.
-The most common case is an offset of 1,
-meaning the last byte is repeated matchlength times.
+A common case is an offset of 1,
+meaning the last byte is repeated `matchlength` times.
Parsing restrictions
@@ -109,15 +109,28 @@ Parsing restrictions
There are specific parsing rules to respect in order to remain compatible
with assumptions made by the decoder :
-1. The last 5 bytes are always literals
+1. The last 5 bytes are always literals. In other words, the last five bytes
+ from the uncompressed input (or all bytes, if the input has less than five
+ bytes) must be encoded as literals on behalf of the last sequence.
+ The last sequence is incomplete, and stops right after the literals.
2. The last match must start at least 12 bytes before end of block.
- Consequently, a block with less than 13 bytes cannot be compressed.
+ The last match is part of the penultimate sequence,
+ since the last sequence stops right after literals.
+ Note that, as a consequence, blocks < 13 bytes cannot be compressed.
These rules are in place to ensure that the decoder
-will never read beyond the input buffer, nor write beyond the output buffer.
-
-Note that the last sequence is also incomplete,
-and stops right after literals.
+can speculatively execute copy instructions
+without ever reading nor writing beyond provided I/O buffers.
+
+1. To copy literals from a non-last sequence, an 8-byte copy instruction
+ can always be safely issued (without reading past the input),
+ because literals are followed by a 2-byte offset,
+ and last sequence is at least 1+5 bytes long.
+2. Similarly, a match operation can speculatively copy up to 12 bytes
+ while remaining within output buffer boundaries.
+
+Empty inputs can be represented with a zero byte,
+interpreted as a token without literals and without a match.
Additional notes
diff --git a/doc/lz4_manual.html b/doc/lz4_manual.html
index f8639fe..ddd2724 100644
--- a/doc/lz4_manual.html
+++ b/doc/lz4_manual.html
@@ -126,15 +126,17 @@ int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int src
<pre><b>int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
</b><p>This function is a bit faster than LZ4_decompress_safe(),
-but doesn't provide any security guarantee.
+but it may misbehave on malformed input because it doesn't perform full validation of compressed data.
originalSize : is the uncompressed size to regenerate
Destination buffer must be already allocated, and its size must be >= 'originalSize' bytes.
return : number of bytes read from source buffer (== compressed size).
If the source stream is detected malformed, the function stops decoding and return a negative result.
- note : This function respects memory boundaries for *properly formed* compressed data.
- However, it does not provide any protection against malicious input.
- It also doesn't know 'src' size, and implies it's >= compressed size.
- Use this function in trusted environment **only**.
+ note : This function is only usable if the originalSize of uncompressed data is known in advance.
+ The caller should also check that all the compressed input has been consumed properly,
+ i.e. that the return value matches the size of the buffer with compressed input.
+ The function never writes past the output buffer. However, since it doesn't know its 'src' size,
+ it may read past the intended input. Also, because match offsets are not validated during decoding,
+ reads from 'src' may underflow. Use this function in trusted environment **only**.
</p></pre><BR>
<pre><b>int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
diff --git a/doc/lz4frame_manual.html b/doc/lz4frame_manual.html
index 459bac8..53ea7eb 100644
--- a/doc/lz4frame_manual.html
+++ b/doc/lz4frame_manual.html
@@ -18,6 +18,7 @@
<li><a href="#Chapter8">Compression</a></li>
<li><a href="#Chapter9">Decompression functions</a></li>
<li><a href="#Chapter10">Streaming decompression functions</a></li>
+<li><a href="#Chapter11">Bulk processing dictionary API</a></li>
</ol>
<hr>
<a name="Chapter1"></a><h2>Introduction</h2><pre>
@@ -89,12 +90,13 @@
<pre><b>typedef struct {
LZ4F_frameInfo_t frameInfo;
- int compressionLevel; </b>/* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */<b>
- unsigned autoFlush; </b>/* 1 == always flush, to reduce usage of internal buffers */<b>
- unsigned reserved[4]; </b>/* must be zero for forward compatibility */<b>
+ int compressionLevel; </b>/* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */<b>
+ unsigned autoFlush; </b>/* 1: always flush, to reduce usage of internal buffers */<b>
+ unsigned favorDecSpeed; </b>/* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4LZ4HC_CLEVEL_OPT_MIN) */ /* >= v1.8.2 */<b>
+ unsigned reserved[3]; </b>/* must be zero for forward compatibility */<b>
} LZ4F_preferences_t;
</b><p> makes it possible to supply detailed compression parameters to the stream interface.
- It's not required to set all fields, as long as the structure was initially memset() to zero.
+ Structure is presumed initially memset() to zero, representing default settings.
All reserved fields must be set to zero.
</p></pre><BR>
@@ -293,5 +295,58 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
and start a new one using same context resources.
</p></pre><BR>
+<pre><b>typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes;
+</b></pre><BR>
+<a name="Chapter11"></a><h2>Bulk processing dictionary API</h2><pre></pre>
+
+<pre><b>LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
+LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
+</b><p> When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict
+</p></pre><BR>
+
+<pre><b>LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr);
+</b><p> Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
+ cctx must point to a context created by LZ4F_createCompressionContext().
+ If cdict==NULL, compress without a dictionary.
+ dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ If this condition is not respected, function will fail (@return an errorCode).
+ The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ but it's not recommended, as it's the only way to provide dictID in the frame header.
+ @return : number of bytes written into dstBuffer.
+ or an error code if it fails (can be tested using LZ4F_isError())
+</p></pre><BR>
+
+<pre><b>LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* prefsPtr);
+</b><p> Inits streaming dictionary compression, and writes the frame header into dstBuffer.
+ dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ `prefsPtr` is optional : you may provide NULL as argument,
+ however, it's the only way to provide dictID in the frame header.
+ @return : number of bytes written into dstBuffer for the header,
+ or an error code (which can be tested using LZ4F_isError())
+</p></pre><BR>
+
+<pre><b>LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict(
+ LZ4F_dctx* dctxPtr,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr);
+</b><p> Same as LZ4F_decompress(), using a predefined dictionary.
+ Dictionary is used "in place", without any preprocessing.
+ It must remain accessible throughout the entire frame decoding.
+</p></pre><BR>
+
</html>
</body>
diff --git a/examples/frameCompress.c b/examples/frameCompress.c
index 972f716..9bfea48 100644
--- a/examples/frameCompress.c
+++ b/examples/frameCompress.c
@@ -21,7 +21,8 @@ static const LZ4F_preferences_t kPrefs = {
0 /* unknown content size */, 0 /* no dictID */ , LZ4F_noBlockChecksum },
0, /* compression level; 0 == default */
0, /* autoflush */
- { 0, 0, 0, 0 }, /* reserved, must be set to 0 */
+ 0, /* favor decompression speed */
+ { 0, 0, 0 }, /* reserved, must be set to 0 */
};
diff --git a/lib/Makefile b/lib/Makefile
index 7b31239..bb45582 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -42,6 +42,7 @@ LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT))
LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT))
LIBVER := $(shell echo $(LIBVER_SCRIPT))
+BUILD_SHARED:=yes
BUILD_STATIC:=yes
CPPFLAGS+= -DXXH_NAMESPACE=LZ4_
@@ -92,6 +93,7 @@ ifeq ($(BUILD_STATIC),yes) # can be disabled on command line
endif
$(LIBLZ4): $(SRCFILES)
+ifeq ($(BUILD_SHARED),yes) # can be disabled on command line
@echo compiling dynamic library $(LIBVER)
ifneq (,$(filter Windows%,$(OS)))
@$(CC) $(FLAGS) -DLZ4_DLL_EXPORT=1 -shared $^ -o dll\$@.dll
@@ -102,6 +104,7 @@ else
@ln -sf $@ liblz4.$(SHARED_EXT_MAJOR)
@ln -sf $@ liblz4.$(SHARED_EXT)
endif
+endif
liblz4: $(LIBLZ4)
@@ -163,9 +166,11 @@ ifeq ($(BUILD_STATIC),yes)
@$(INSTALL_DATA) liblz4.a $(DESTDIR)$(LIBDIR)/liblz4.a
@$(INSTALL_DATA) lz4frame_static.h $(DESTDIR)$(INCLUDEDIR)/lz4frame_static.h
endif
+ifeq ($(BUILD_SHARED),yes)
@$(INSTALL_PROGRAM) liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)
@ln -sf liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/liblz4.$(SHARED_EXT_MAJOR)
@ln -sf liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/liblz4.$(SHARED_EXT)
+endif
@echo Installing headers in $(INCLUDEDIR)
@$(INSTALL_DATA) lz4.h $(DESTDIR)$(INCLUDEDIR)/lz4.h
@$(INSTALL_DATA) lz4hc.h $(DESTDIR)$(INCLUDEDIR)/lz4hc.h
@@ -181,6 +186,7 @@ uninstall:
@$(RM) $(DESTDIR)$(INCLUDEDIR)/lz4.h
@$(RM) $(DESTDIR)$(INCLUDEDIR)/lz4hc.h
@$(RM) $(DESTDIR)$(INCLUDEDIR)/lz4frame.h
+ @$(RM) $(DESTDIR)$(INCLUDEDIR)/lz4frame_static.h
@echo lz4 libraries successfully uninstalled
endif
diff --git a/lib/lz4.c b/lib/lz4.c
index 916acf0..1e0d8e9 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -162,11 +162,11 @@
* Memory routines
**************************************/
#include <stdlib.h> /* malloc, calloc, free */
-#define ALLOC(s) malloc(s)
+#define ALLOC(s) malloc(s)
#define ALLOC_AND_ZERO(s) calloc(1,s)
-#define FREEMEM free
+#define FREEMEM(p) free(p)
#include <string.h> /* memset, memcpy */
-#define MEM_INIT memset
+#define MEM_INIT(p,v,s) memset((p),(v),(s))
/*-************************************
@@ -454,7 +454,7 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru
/*-************************************
* Local Structures and types
**************************************/
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { notLimited = 0, limitedOutput = 1, fillOutput = 2 } limitedOutput_directive;
typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
/**
@@ -638,6 +638,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
const char* const source,
char* const dest,
const int inputSize,
+ int *inputConsumed, /* only written when outputLimited == fillOutput */
const int maxOutputSize,
const limitedOutput_directive outputLimited,
const tableType_t tableType,
@@ -680,6 +681,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
/* Init conditions */
+ if (outputLimited == fillOutput && maxOutputSize < 1) return 0; /* Impossible to store anything */
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
assert(acceleration >= 1);
@@ -777,9 +779,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
- if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */
- if ((tableType != byU16) && (current - matchIndex > MAX_DISTANCE)) continue; /* too far - note: works even if matchIndex overflows */
- if (tableType == byU16) assert((current - matchIndex) <= MAX_DISTANCE); /* too_far presumed impossible with byU16 */
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */
+ assert(matchIndex < current);
+ if ((tableType != byU16) && (matchIndex+MAX_DISTANCE < current)) continue; /* too far */
+ if (tableType == byU16) assert((current - matchIndex) <= MAX_DISTANCE); /* too_far presumed impossible with byU16 */
if (LZ4_read32(match) == LZ4_read32(ip)) {
if (maybe_extMem) offset = current - matchIndex;
@@ -795,9 +798,14 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
- if ((outputLimited) && /* Check output buffer overflow */
+ if ((outputLimited == limitedOutput) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
return 0;
+ if ((outputLimited == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
if (litLength >= RUN_MASK) {
int len = (int)litLength-RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
@@ -822,6 +830,13 @@ _next_match:
* - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
*/
+ if ((outputLimited == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
/* Encode Offset */
if (maybe_extMem) { /* static test */
DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
@@ -855,9 +870,17 @@ _next_match:
DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
}
- if ( outputLimited && /* Check output buffer overflow */
- (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
- return 0;
+ if ((outputLimited) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) {
+ if (outputLimited == limitedOutput)
+ return 0;
+ if (outputLimited == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 2 - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ matchCode = newMatchCode;
+ }
+ }
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
@@ -919,8 +942,9 @@ _next_match:
match = base + matchIndex;
}
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
- && ((tableType==byU16) ? 1 : (current - matchIndex <= MAX_DISTANCE))
+ && ((tableType==byU16) ? 1 : (matchIndex+MAX_DISTANCE >= current))
&& (LZ4_read32(match) == LZ4_read32(ip)) ) {
token=op++;
*token=0;
@@ -938,10 +962,17 @@ _next_match:
_last_literals:
/* Encode Last Literals */
- { size_t const lastRun = (size_t)(iend - anchor);
+ { size_t lastRun = (size_t)(iend - anchor);
if ( (outputLimited) && /* Check output buffer overflow */
- ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
- return 0;
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputLimited == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ lastRun = (olimit-op) - 1;
+ lastRun -= (lastRun+240)/255;
+ }
+ if (outputLimited == limitedOutput)
+ return 0;
+ }
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
@@ -951,9 +982,13 @@ _last_literals:
*op++ = (BYTE)(lastRun<<ML_BITS);
}
memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
op += lastRun;
}
+ if (outputLimited == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
return (int)(((char*)op) - dest);
}
@@ -965,17 +1000,17 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
LZ4_resetStream((LZ4_stream_t*)state);
if (maxOutputSize >= LZ4_compressBound(inputSize)) {
if (inputSize < LZ4_64Klimit) {
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
if (inputSize < LZ4_64Klimit) {;
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
}
}
@@ -999,28 +1034,28 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
const tableType_t tableType = byU16;
LZ4_prepareTable(ctx, srcSize, tableType);
if (ctx->currentOffset) {
- return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
} else {
- return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
LZ4_prepareTable(ctx, srcSize, tableType);
- return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
if (srcSize < LZ4_64Klimit) {
const tableType_t tableType = byU16;
LZ4_prepareTable(ctx, srcSize, tableType);
if (ctx->currentOffset) {
- return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
} else {
- return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
LZ4_prepareTable(ctx, srcSize, tableType);
- return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
}
}
@@ -1059,172 +1094,15 @@ int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int m
LZ4_resetStream(&ctx);
if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
else
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-}
-
-
-/*-******************************
-* *_destSize() variant
-********************************/
-
-static int LZ4_compress_destSize_generic(
- LZ4_stream_t_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- const int targetDstSize,
- const tableType_t tableType)
-{
- const BYTE* ip = (const BYTE*) src;
- const BYTE* base = (const BYTE*) src;
- const BYTE* lowLimit = (const BYTE*) src;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + *srcSizePtr;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = iend - LASTLITERALS;
-
- BYTE* op = (BYTE*) dst;
- BYTE* const oend = op + targetDstSize;
- BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
- BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
- BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
-
- U32 forwardH;
-
-
- /* Init conditions */
- if (targetDstSize < 1) return 0; /* Impossible to store anything */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
- if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
- if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
-
- /* First Byte */
- *srcSizePtr = 0;
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE* match;
- BYTE* token;
-
- /* Find a match */
- { const BYTE* forwardIp = ip;
- unsigned step = 1;
- unsigned searchMatchNb = 1 << LZ4_skipTrigger;
-
- do {
- U32 h = forwardH;
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
-
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
-
- match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
- forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
-
- } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match) != LZ4_read32(ip)) );
- }
-
- /* Catch up */
- while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
-
- /* Encode Literal length */
- { unsigned litLength = (unsigned)(ip - anchor);
- token = op++;
- if (op + ((litLength+240)/255) + litLength > oMaxLit) {
- /* Not enough space for a last match */
- op--;
- goto _last_literals;
- }
- if (litLength>=RUN_MASK) {
- unsigned len = litLength - RUN_MASK;
- *token=(RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
- }
- else *token = (BYTE)(litLength<<ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op+litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
-
- /* Encode MatchLength */
- { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
-
- if (op + ((matchLength+240)/255) > oMaxMatch) {
- /* Match description too long : reduce it */
- matchLength = (15-1) + (oMaxMatch-op) * 255;
- }
- ip += MINMATCH + matchLength;
-
- if (matchLength>=ML_MASK) {
- *token += ML_MASK;
- matchLength -= ML_MASK;
- while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
- *op++ = (BYTE)matchLength;
- }
- else *token += (BYTE)(matchLength);
- }
-
- anchor = ip;
-
- /* Test end of block */
- if (ip > mflimit) break;
- if (op > oMaxSeq) break;
-
- /* Fill table */
- LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- if ( (match+MAX_DISTANCE>=ip)
- && (LZ4_read32(match)==LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRunSize = (size_t)(iend - anchor);
- if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (oend-op) - 1;
- lastRunSize -= (lastRunSize+240)/255;
- }
- ip = anchor + lastRunSize;
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize<<ML_BITS);
- }
- memcpy(op, anchor, lastRunSize);
- op += lastRunSize;
- }
-
- /* End */
- *srcSizePtr = (int) (((const char*)ip)-src);
- return (int) (((char*)op)-dst);
+ return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
}
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
LZ4_resetStream(state);
@@ -1233,10 +1111,10 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src,
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
} else {
if (*srcSizePtr < LZ4_64Klimit) {
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
} else {
tableType_t const tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, tableType);
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, tableType, noDict, noDictIssue, 1);
} }
}
@@ -1305,7 +1183,11 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
- LZ4_prepareTable(dict, 0, tableType);
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
/* We always increment the offset by 64 KB, since, if the dict is longer,
* we truncate it to the last 64k, and if it's shorter, we still want to
@@ -1392,9 +1274,9 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE*)source) {
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- return LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
else
- return LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
}
/* external dictionary mode */
@@ -1412,15 +1294,15 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
* so that the compression loop is only looking into one table.
*/
memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
} else {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
}
} else {
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
} else {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
}
}
streamPtr->dictionary = (const BYTE*)source;
@@ -1439,9 +1321,9 @@ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char*
LZ4_renormDictT(streamPtr, srcSize);
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
} else {
- result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
}
streamPtr->dictionary = (const BYTE*)source;
@@ -1521,6 +1403,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */
if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
+ if ((endOnInput) && unlikely(srcSize==0)) return -1;
/* Main Loop : decode sequences */
while (1) {
@@ -1530,13 +1413,17 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
unsigned const token = *ip++;
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
/* shortcut for common case :
* in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes).
* this shortcut was tested on x86 and x64, where it improves decoding speed.
- * it has not yet been benchmarked on ARM, Power, mips, etc. */
- if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend)
- & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend))
- & ((token < (15<<ML_BITS)) & ((token & ML_MASK) != 15)) ) {
+ * it has not yet been benchmarked on ARM, Power, mips, etc.
+ * NOTE: The loop begins with a read, so we must have one byte left at the end. */
+ if (endOnInput
+ && ((ip + 14 /*maxLL*/ + 2 /*offset*/ < iend)
+ & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend)
+ & (token < (15<<ML_BITS))
+ & ((token & ML_MASK) != 15) ) ) {
size_t const ll = token >> ML_BITS;
size_t const off = LZ4_readLE16(ip+ll);
const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */
@@ -1554,6 +1441,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
/* decode literal length */
if ((length=(token>>ML_BITS)) == RUN_MASK) {
unsigned s;
+ if (unlikely(endOnInput ? ip >= iend-RUN_MASK : 0)) goto _output_error; /* overflow detection */
do {
s = *ip++;
length += s;
diff --git a/lib/lz4.h b/lib/lz4.h
index 0dfa19e..db6353c 100644
--- a/lib/lz4.h
+++ b/lib/lz4.h
@@ -206,15 +206,17 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt
/*!
LZ4_decompress_fast() : **unsafe!**
This function is a bit faster than LZ4_decompress_safe(),
-but doesn't provide any security guarantee.
+but it may misbehave on malformed input because it doesn't perform full validation of compressed data.
originalSize : is the uncompressed size to regenerate
Destination buffer must be already allocated, and its size must be >= 'originalSize' bytes.
return : number of bytes read from source buffer (== compressed size).
If the source stream is detected malformed, the function stops decoding and return a negative result.
- note : This function respects memory boundaries for *properly formed* compressed data.
- However, it does not provide any protection against malicious input.
- It also doesn't know 'src' size, and implies it's >= compressed size.
- Use this function in trusted environment **only**.
+ note : This function is only usable if the originalSize of uncompressed data is known in advance.
+ The caller should also check that all the compressed input has been consumed properly,
+ i.e. that the return value matches the size of the buffer with compressed input.
+ The function never writes past the output buffer. However, since it doesn't know its 'src' size,
+ it may read past the intended input. Also, because match offsets are not validated during decoding,
+ reads from 'src' may underflow. Use this function in trusted environment **only**.
*/
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
diff --git a/lib/lz4frame.c b/lib/lz4frame.c
index 507e4fe..6cf2a06 100644
--- a/lib/lz4frame.c
+++ b/lib/lz4frame.c
@@ -73,7 +73,8 @@ You can contact the author at :
/*-************************************
* Includes
**************************************/
-#include "lz4frame_static.h"
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
#define LZ4_STATIC_LINKING_ONLY
#include "lz4.h"
#define LZ4_HC_STATIC_LINKING_ONLY
@@ -456,7 +457,7 @@ struct LZ4F_CDict_s {
LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
{
const char* dictStart = (const char*)dictBuffer;
- LZ4F_CDict* cdict = (LZ4F_CDict*) malloc(sizeof(*cdict));
+ LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
if (!cdict) return NULL;
if (dictSize > 64 KB) {
dictStart += dictSize - 64 KB;
@@ -470,9 +471,8 @@ LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
return NULL;
}
memcpy(cdict->dictContent, dictStart, dictSize);
- LZ4_resetStream(cdict->fastCtx);
LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
- LZ4_resetStreamHC(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
+ LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
return cdict;
}
@@ -527,19 +527,23 @@ LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_comp
}
-static void LZ4F_applyCDict(void* ctx,
+/**
+ * This function prepares the internal LZ4(HC) stream for a new compression,
+ * resetting the context and attaching the dictionary, if there is one.
+ *
+ * It needs to be called at the beginning of each independent compression
+ * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
+ * beginning of each block in blockIndependent mode).
+ */
+static void LZ4F_initStream(void* ctx,
const LZ4F_CDict* cdict,
int level) {
if (level < LZ4HC_CLEVEL_MIN) {
LZ4_resetStream_fast((LZ4_stream_t *)ctx);
LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
} else {
- if (cdict) {
- memcpy(ctx, cdict->HCCtx, sizeof(*cdict->HCCtx));
- LZ4_setCompressionLevel((LZ4_streamHC_t*)ctx, level);
- } else {
- LZ4_resetStreamHC((LZ4_streamHC_t*)(ctx), level);
- }
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
+ LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
}
}
@@ -613,7 +617,10 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
cctxPtr->cdict = cdict;
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
/* frame init only for blockLinked : blockIndependent will be init at each block */
- LZ4F_applyCDict(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel);
+ LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel);
+ }
+ if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
+ LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
}
/* Magic Number */
@@ -708,7 +715,7 @@ static size_t LZ4F_makeBlock(void* dst, const void* src, size_t srcSize,
static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
{
int const acceleration = (level < -1) ? -level : 1;
- LZ4F_applyCDict(ctx, cdict, level);
+ LZ4F_initStream(ctx, cdict, level);
if (cdict) {
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
} else {
@@ -725,11 +732,11 @@ static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, in
static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
{
+ LZ4F_initStream(ctx, cdict, level);
if (cdict) {
- LZ4F_applyCDict(ctx, cdict, level);
return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
}
- return LZ4_compress_HC_extStateHC(ctx, src, dst, srcSize, dstCapacity, level);
+ return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
}
static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
@@ -1505,11 +1512,19 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
} }
if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
+ const char *dict = (const char *)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ if (dict && dictSize > 1 GB) {
+ /* the dictSize param is an int, avoid truncation / sign issues */
+ dict += dictSize - 1 GB;
+ dictSize = 1 GB;
+ }
/* enough capacity in `dst` to decompress directly there */
- int const decodedSize = LZ4_decompress_safe_usingDict(
+ decodedSize = LZ4_decompress_safe_usingDict(
(const char*)selectedIn, (char*)dstPtr,
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
- (const char*)dctx->dict, (int)dctx->dictSize);
+ dict, (int)dictSize);
if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
if (dctx->frameInfo.contentChecksumFlag)
XXH32_update(&(dctx->xxh), dstPtr, decodedSize);
@@ -1541,10 +1556,19 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
}
/* Decode block */
- { int const decodedSize = LZ4_decompress_safe_usingDict(
+ {
+ const char *dict = (const char *)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ if (dict && dictSize > 1 GB) {
+ /* the dictSize param is an int, avoid truncation / sign issues */
+ dict += dictSize - 1 GB;
+ dictSize = 1 GB;
+ }
+ decodedSize = LZ4_decompress_safe_usingDict(
(const char*)selectedIn, (char*)dctx->tmpOut,
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
- (const char*)dctx->dict, (int)dctx->dictSize);
+ dict, (int)dictSize);
if (decodedSize < 0) /* decompression failed */
return err0r(LZ4F_ERROR_decompressionFailed);
if (dctx->frameInfo.contentChecksumFlag)
diff --git a/lib/lz4frame.h b/lib/lz4frame.h
index e80b1a1..fb434ff 100644
--- a/lib/lz4frame.h
+++ b/lib/lz4frame.h
@@ -173,13 +173,14 @@ typedef struct {
/*! LZ4F_preferences_t :
* makes it possible to supply detailed compression parameters to the stream interface.
- * It's not required to set all fields, as long as the structure was initially memset() to zero.
+ * Structure is presumed initially memset() to zero, representing default settings.
* All reserved fields must be set to zero. */
typedef struct {
LZ4F_frameInfo_t frameInfo;
- int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
- unsigned autoFlush; /* 1 == always flush, to reduce usage of internal buffers */
- unsigned reserved[4]; /* must be zero for forward compatibility */
+ int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
+ unsigned autoFlush; /* 1: always flush, to reduce usage of internal buffers */
+ unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4LZ4HC_CLEVEL_OPT_MIN) */ /* >= v1.8.2 */
+ unsigned reserved[3]; /* must be zero for forward compatibility */
} LZ4F_preferences_t;
LZ4FLIB_API int LZ4F_compressionLevel_max(void);
@@ -409,3 +410,123 @@ LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always su
#endif
#endif /* LZ4F_H_09782039843 */
+
+#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
+#define LZ4F_H_STATIC_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* These declarations are not stable and may change in the future. They are
+ * therefore only safe to depend on when the caller is statically linked
+ * against the library. To access their declarations, define
+ * LZ4F_STATIC_LINKING_ONLY.
+ *
+ * There is a further protection mechanism where these symbols aren't published
+ * into shared/dynamic libraries. You can override this behavior and force
+ * them to be published by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. Use at
+ * your own risk.
+ */
+#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
+#define LZ4FLIB_STATIC_API LZ4FLIB_API
+#else
+#define LZ4FLIB_STATIC_API
+#endif
+
+
+/* --- Error List --- */
+#define LZ4F_LIST_ERRORS(ITEM) \
+ ITEM(OK_NoError) \
+ ITEM(ERROR_GENERIC) \
+ ITEM(ERROR_maxBlockSize_invalid) \
+ ITEM(ERROR_blockMode_invalid) \
+ ITEM(ERROR_contentChecksumFlag_invalid) \
+ ITEM(ERROR_compressionLevel_invalid) \
+ ITEM(ERROR_headerVersion_wrong) \
+ ITEM(ERROR_blockChecksum_invalid) \
+ ITEM(ERROR_reservedFlag_set) \
+ ITEM(ERROR_allocation_failed) \
+ ITEM(ERROR_srcSize_tooLarge) \
+ ITEM(ERROR_dstMaxSize_tooSmall) \
+ ITEM(ERROR_frameHeader_incomplete) \
+ ITEM(ERROR_frameType_unknown) \
+ ITEM(ERROR_frameSize_wrong) \
+ ITEM(ERROR_srcPtr_wrong) \
+ ITEM(ERROR_decompressionFailed) \
+ ITEM(ERROR_headerChecksum_invalid) \
+ ITEM(ERROR_contentChecksum_invalid) \
+ ITEM(ERROR_frameDecoding_alreadyStarted) \
+ ITEM(ERROR_maxCode)
+
+#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
+
+/* enum list is exposed, to handle specific errors */
+typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes;
+
+LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
+
+
+
+/**********************************
+ * Bulk processing dictionary API
+ *********************************/
+typedef struct LZ4F_CDict_s LZ4F_CDict;
+
+/*! LZ4_createCDict() :
+ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
+LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
+LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
+
+
+/*! LZ4_compressFrame_usingCDict() :
+ * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
+ * cctx must point to a context created by LZ4F_createCompressionContext().
+ * If cdict==NULL, compress without a dictionary.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * If this condition is not respected, function will fail (@return an errorCode).
+ * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ * but it's not recommended, as it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr);
+
+
+/*! LZ4F_compressBegin_usingCDict() :
+ * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
+ * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * `prefsPtr` is optional : you may provide NULL as argument,
+ * however, it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer for the header,
+ * or an error code (which can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* prefsPtr);
+
+
+/*! LZ4F_decompress_usingDict() :
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+ * It must remain accessible throughout the entire frame decoding. */
+LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict(
+ LZ4F_dctx* dctxPtr,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */
diff --git a/lib/lz4frame_static.h b/lib/lz4frame_static.h
index be587e6..925a2c5 100644
--- a/lib/lz4frame_static.h
+++ b/lib/lz4frame_static.h
@@ -36,121 +36,12 @@
#ifndef LZ4FRAME_STATIC_H_0398209384
#define LZ4FRAME_STATIC_H_0398209384
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* lz4frame_static.h should be used solely in the context of static linking.
- * It contains definitions which are not stable and may change in the future.
- * Never use it in the context of DLL linking.
- *
- * Defining LZ4F_PUBLISH_STATIC_FUNCTIONS allows one to override this. Use at
- * your own risk.
+/* The declarations that formerly were made here have been merged into
+ * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward,
+ * it is recommended to simply include that header directly.
*/
-#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
-#define LZ4FLIB_STATIC_API LZ4FLIB_API
-#else
-#define LZ4FLIB_STATIC_API
-#endif
-
-/* --- Dependency --- */
+#define LZ4F_STATIC_LINKING_ONLY
#include "lz4frame.h"
-
-/* --- Error List --- */
-#define LZ4F_LIST_ERRORS(ITEM) \
- ITEM(OK_NoError) \
- ITEM(ERROR_GENERIC) \
- ITEM(ERROR_maxBlockSize_invalid) \
- ITEM(ERROR_blockMode_invalid) \
- ITEM(ERROR_contentChecksumFlag_invalid) \
- ITEM(ERROR_compressionLevel_invalid) \
- ITEM(ERROR_headerVersion_wrong) \
- ITEM(ERROR_blockChecksum_invalid) \
- ITEM(ERROR_reservedFlag_set) \
- ITEM(ERROR_allocation_failed) \
- ITEM(ERROR_srcSize_tooLarge) \
- ITEM(ERROR_dstMaxSize_tooSmall) \
- ITEM(ERROR_frameHeader_incomplete) \
- ITEM(ERROR_frameType_unknown) \
- ITEM(ERROR_frameSize_wrong) \
- ITEM(ERROR_srcPtr_wrong) \
- ITEM(ERROR_decompressionFailed) \
- ITEM(ERROR_headerChecksum_invalid) \
- ITEM(ERROR_contentChecksum_invalid) \
- ITEM(ERROR_frameDecoding_alreadyStarted) \
- ITEM(ERROR_maxCode)
-
-#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
-
-/* enum list is exposed, to handle specific errors */
-typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes;
-
-LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
-
-
-
-/**********************************
- * Bulk processing dictionary API
- *********************************/
-typedef struct LZ4F_CDict_s LZ4F_CDict;
-
-/*! LZ4_createCDict() :
- * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
- * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
- * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
-LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
-LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
-
-
-/*! LZ4_compressFrame_usingCDict() :
- * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
- * cctx must point to a context created by LZ4F_createCompressionContext().
- * If cdict==NULL, compress without a dictionary.
- * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * If this condition is not respected, function will fail (@return an errorCode).
- * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
- * but it's not recommended, as it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict(
- LZ4F_cctx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* preferencesPtr);
-
-
-/*! LZ4F_compressBegin_usingCDict() :
- * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
- * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you may provide NULL as argument,
- * however, it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer for the header,
- * or an error code (which can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict(
- LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* prefsPtr);
-
-
-/*! LZ4F_decompress_usingDict() :
- * Same as LZ4F_decompress(), using a predefined dictionary.
- * Dictionary is used "in place", without any preprocessing.
- * It must remain accessible throughout the entire frame decoding. */
-LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict(
- LZ4F_dctx* dctxPtr,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const void* dict, size_t dictSize,
- const LZ4F_decompressOptions_t* decompressOptionsPtr);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* LZ4FRAME_STATIC_H_0398209384 */
diff --git a/lib/lz4hc.c b/lib/lz4hc.c
index 111a09b..948d66d 100644
--- a/lib/lz4hc.c
+++ b/lib/lz4hc.c
@@ -79,21 +79,33 @@
static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtx } dictCtx_directive;
/**************************************
* HC Compression
**************************************/
-static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start)
+static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
{
MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
- hc4->nextToUpdate = 64 KB;
- hc4->base = start - 64 KB;
+}
+
+static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start)
+{
+ uptrval startingOffset = hc4->end - hc4->base;
+ if (startingOffset > 1 GB) {
+ LZ4HC_clearTables(hc4);
+ startingOffset = 0;
+ }
+ startingOffset += 64 KB;
+ hc4->nextToUpdate = (U32) startingOffset;
+ hc4->base = start - startingOffset;
hc4->end = start;
- hc4->dictBase = start - 64 KB;
- hc4->dictLimit = 64 KB;
- hc4->lowLimit = 64 KB;
+ hc4->dictBase = start - startingOffset;
+ hc4->dictLimit = (U32) startingOffset;
+ hc4->lowLimit = (U32) startingOffset;
}
@@ -187,6 +199,7 @@ LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
}
typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
+typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
LZ4_FORCE_INLINE int
LZ4HC_InsertAndGetWiderMatch (
@@ -198,19 +211,24 @@ LZ4HC_InsertAndGetWiderMatch (
const BYTE** matchpos,
const BYTE** startpos,
const int maxNbAttempts,
- const int patternAnalysis)
+ const int patternAnalysis,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
{
U16* const chainTable = hc4->chainTable;
U32* const HashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
const BYTE* const base = hc4->base;
const U32 dictLimit = hc4->dictLimit;
const BYTE* const lowPrefixPtr = base + dictLimit;
- const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - MAX_DISTANCE;
+ const U32 ipIndex = (U32)(ip - base);
+ const U32 lowLimit = (hc4->lowLimit + 64 KB > ipIndex) ? hc4->lowLimit : ipIndex - MAX_DISTANCE;
const BYTE* const dictBase = hc4->dictBase;
int const delta = (int)(ip-iLowLimit);
int nbAttempts = maxNbAttempts;
U32 const pattern = LZ4_read32(ip);
U32 matchIndex;
+ U32 dictMatchIndex;
repeat_state_e repeat = rep_untested;
size_t srcPatternLength = 0;
@@ -224,7 +242,10 @@ LZ4HC_InsertAndGetWiderMatch (
while ((matchIndex>=lowLimit) && (nbAttempts)) {
DEBUGLOG(7, "remaining attempts : %i", nbAttempts);
nbAttempts--;
- if (matchIndex >= dictLimit) {
+ assert(matchIndex < ipIndex);
+ if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
+ /* do nothing */
+ } else if (matchIndex >= dictLimit) {
const BYTE* const matchPtr = base + matchIndex;
assert(longest >= 1);
if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - delta + longest - 1)) {
@@ -287,6 +308,35 @@ LZ4HC_InsertAndGetWiderMatch (
} } } }
} /* while ((matchIndex>=lowLimit) && (nbAttempts)) */
+ if (dict == usingDictCtx && nbAttempts && ipIndex - lowLimit < MAX_DISTANCE) {
+ size_t const dictEndOffset = dictCtx->end - dictCtx->base;
+ assert(dictEndOffset <= 1 GB);
+ dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ matchIndex = dictMatchIndex + lowLimit - (U32)dictEndOffset;
+ while (ipIndex - matchIndex <= MAX_DISTANCE && nbAttempts--) {
+ const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == pattern) {
+ int mlt;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ back = delta ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
+ mlt -= back;
+ if (mlt > longest) {
+ longest = mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
+ }
+ }
+
+ { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
+ dictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ }
+ }
+ }
return longest;
}
@@ -295,13 +345,14 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
const BYTE* const ip, const BYTE* const iLimit,
const BYTE** matchpos,
const int maxNbAttempts,
- const int patternAnalysis)
+ const int patternAnalysis,
+ const dictCtx_directive dict)
{
const BYTE* uselessPtr = ip;
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won't be the case here, as we define iLowLimit==ip,
* so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis);
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, dict, favorCompressionRatio);
}
@@ -384,14 +435,15 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
return 0;
}
-static int LZ4HC_compress_hashChain (
+LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
LZ4HC_CCtx_internal* const ctx,
const char* const source,
char* const dest,
int* srcSizePtr,
int const maxOutputSize,
unsigned maxNbAttempts,
- limitedOutput_directive limit
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
)
{
const int inputSize = *srcSizePtr;
@@ -423,7 +475,7 @@ static int LZ4HC_compress_hashChain (
/* Main Loop */
while (ip <= mflimit) {
- ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis);
+ ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
if (ml<MINMATCH) { ip++; continue; }
/* saved, in case we would skip too much */
@@ -435,7 +487,7 @@ _Search2:
if (ip+ml <= mflimit)
ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
- maxNbAttempts, patternAnalysis);
+ maxNbAttempts, patternAnalysis, dict, favorCompressionRatio);
else
ml2 = ml;
@@ -482,7 +534,7 @@ _Search3:
if (start2 + ml2 <= mflimit)
ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
- maxNbAttempts, patternAnalysis);
+ maxNbAttempts, patternAnalysis, dict, favorCompressionRatio);
else
ml3 = ml2;
@@ -602,21 +654,25 @@ _dest_overflow:
return 0;
}
+
static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
const char* const source, char* dst,
int* srcSizePtr, int dstCapacity,
int const nbSearches, size_t sufficient_len,
- limitedOutput_directive limit, int const fullUpdate);
+ const limitedOutput_directive limit, int const fullUpdate,
+ const dictCtx_directive dict,
+ HCfavor_e favorDecSpeed);
-static int LZ4HC_compress_generic (
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
LZ4HC_CCtx_internal* const ctx,
const char* const src,
char* const dst,
int* const srcSizePtr,
int const dstCapacity,
int cLevel,
- limitedOutput_directive limit
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
)
{
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
@@ -641,6 +697,8 @@ static int LZ4HC_compress_generic (
{ lz4opt,8192, LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
};
+ DEBUGLOG(4, "LZ4HC_compress_generic(%p, %p, %d)", ctx, src, *srcSizePtr);
+
if (limit == limitedDestSize && dstCapacity < 1) return 0; /* Impossible to store anything */
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
@@ -650,25 +708,86 @@ static int LZ4HC_compress_generic (
assert(cLevel >= 0);
assert(cLevel <= LZ4HC_CLEVEL_MAX);
{ cParams_t const cParam = clTable[cLevel];
+ HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
if (cParam.strat == lz4hc)
return LZ4HC_compress_hashChain(ctx,
src, dst, srcSizePtr, dstCapacity,
- cParam.nbSearches, limit);
+ cParam.nbSearches, limit, dict);
assert(cParam.strat == lz4opt);
return LZ4HC_compress_optimal(ctx,
src, dst, srcSizePtr, dstCapacity,
cParam.nbSearches, cParam.targetLength, limit,
- cLevel == LZ4HC_CLEVEL_MAX); /* ultra mode */
+ cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
+ dict, favor);
+ }
+}
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
+
+static int LZ4HC_compress_generic_noDictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ assert(ctx->dictCtx == NULL);
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
+}
+
+static int LZ4HC_compress_generic_dictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ const size_t position = ctx->end - ctx->base - ctx->lowLimit;
+ assert(ctx->dictCtx != NULL);
+ if (position >= 64 KB) {
+ ctx->dictCtx = NULL;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else if (position == 0 && *srcSizePtr > 4 KB) {
+ memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
+ LZ4HC_setExternalDict(ctx, (const BYTE *)src);
+ ctx->compressionLevel = (short)cLevel;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtx);
+ }
+}
+
+static int LZ4HC_compress_generic (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ if (ctx->dictCtx == NULL) {
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
}
}
int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); }
-int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
LZ4HC_init (ctx, (const BYTE*)src);
if (dstCapacity < LZ4_compressBound(srcSize))
return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
@@ -676,10 +795,17 @@ int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int src
return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, noLimit);
}
+int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
+ LZ4_resetStreamHC ((LZ4_streamHC_t*)state, compressionLevel);
+ return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
+}
+
int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
- LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t));
+ LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
#else
LZ4_streamHC_t state;
LZ4_streamHC_t* const statePtr = &state;
@@ -696,6 +822,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in
int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
{
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
+ LZ4_resetStreamHC((LZ4_streamHC_t*)LZ4HC_Data, cLevel);
LZ4HC_init(ctx, (const BYTE*) source);
return LZ4HC_compress_generic(ctx, source, dest, sourceSizePtr, targetDestSize, cLevel, limitedDestSize);
}
@@ -706,8 +833,15 @@ int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, i
* Streaming Functions
**************************************/
/* allocation */
-LZ4_streamHC_t* LZ4_createStreamHC(void) { return (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); }
-int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) {
+LZ4_streamHC_t* LZ4_createStreamHC(void) {
+ LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
+ if (LZ4_streamHCPtr==NULL) return NULL;
+ LZ4_resetStreamHC(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
+ return LZ4_streamHCPtr;
+}
+
+int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) {
+ DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
free(LZ4_streamHCPtr);
return 0;
@@ -718,7 +852,20 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) {
void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
{
LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET); /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
+ DEBUGLOG(4, "LZ4_resetStreamHC(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
}
@@ -726,27 +873,38 @@ void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLev
{
if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
- LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
+}
+
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
+{
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
}
int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
if (dictSize > 64 KB) {
dictionary += dictSize - 64 KB;
dictSize = 64 KB;
}
+ LZ4_resetStreamHC(LZ4_streamHCPtr, ctxPtr->compressionLevel);
LZ4HC_init (ctxPtr, (const BYTE*)dictionary);
ctxPtr->end = (const BYTE*)dictionary + dictSize;
if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
return dictSize;
}
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
+ working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
+}
/* compression */
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
{
+ DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
@@ -764,6 +922,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
limitedOutput_directive limit)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(4, "LZ4_compressHC_continue_generic(%p, %p, %d)", LZ4_streamHCPtr, src, *srcSizePtr);
/* auto-init if forgotten */
if (ctxPtr->base == NULL) LZ4HC_init (ctxPtr, (const BYTE*) src);
@@ -812,6 +971,7 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS
{
LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
+ DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
if (dictSize > 64 KB) dictSize = 64 KB;
if (dictSize < 4) dictSize = 0;
if (dictSize > prefixSize) dictSize = prefixSize;
@@ -851,8 +1011,8 @@ int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
{
LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */
+ LZ4_resetStreamHC((LZ4_streamHC_t*)state, ((LZ4_streamHC_t*)state)->internal_donotuse.compressionLevel);
LZ4HC_init(ctx, (const BYTE*)inputBuffer);
- ctx->inputBuffer = inputBuffer;
return 0;
}
@@ -860,9 +1020,8 @@ void* LZ4_createHC (const char* inputBuffer)
{
LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
if (hc4 == NULL) return NULL; /* not enough memory */
+ LZ4_resetStreamHC(hc4, 0 /* compressionLevel */);
LZ4HC_init (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- assert(sizeof(size_t) == sizeof(void*));
- hc4->internal_donotuse.inputBuffer = (void*)(size_t)inputBuffer; /* ugly hack, circumvent -Wcast-qual */
return hc4;
}
@@ -884,9 +1043,11 @@ int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, c
char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
{
- LZ4HC_CCtx_internal* const hc4 = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
- int const dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB);
- return (char*)(hc4->inputBuffer) + dictSize;
+ LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
+ const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
+ LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
+ /* avoid const char * -> char * conversion warning :( */
+ return (char *)(uptrval)bufferStart;
}
@@ -932,23 +1093,27 @@ typedef struct {
LZ4_FORCE_INLINE LZ4HC_match_t
LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
const BYTE* ip, const BYTE* const iHighLimit,
- int minLen, int nbSearches)
+ int minLen, int nbSearches,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
{
LZ4HC_match_t match = { 0 , 0 };
const BYTE* matchPtr = NULL;
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won't be the case here, as we define iLowLimit==ip,
* so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- int const matchLength = LZ4HC_InsertAndGetWiderMatch(ctx,
+ int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx,
ip, ip, iHighLimit, minLen, &matchPtr, &ip,
- nbSearches, 1 /* patternAnalysis */);
+ nbSearches, 1 /* patternAnalysis */, dict, favorDecSpeed);
if (matchLength <= minLen) return match;
+ if (favorDecSpeed) {
+ if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
+ }
match.len = matchLength;
match.off = (int)(ip-matchPtr);
return match;
}
-
static int LZ4HC_compress_optimal (
LZ4HC_CCtx_internal* ctx,
const char* const source,
@@ -957,8 +1122,10 @@ static int LZ4HC_compress_optimal (
int dstCapacity,
int const nbSearches,
size_t sufficient_len,
- limitedOutput_directive limit,
- int const fullUpdate
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed
)
{
#define TRAILING_LITERALS 3
@@ -986,7 +1153,7 @@ static int LZ4HC_compress_optimal (
int best_mlen, best_off;
int cur, last_match_pos = 0;
- LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches);
+ LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
if (firstMatch.len==0) { ip++; continue; }
if ((size_t)firstMatch.len > sufficient_len) {
@@ -1056,10 +1223,10 @@ static int LZ4HC_compress_optimal (
DEBUGLOG(7, "search at rPos:%u", cur);
if (fullUpdate)
- newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches);
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
else
/* only test matches of minimum length; slightly faster, but misses a few bytes */
- newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches);
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
if (!newMatch.len) continue;
if ( ((size_t)newMatch.len > sufficient_len)
@@ -1107,7 +1274,9 @@ static int LZ4HC_compress_optimal (
price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
}
- if (pos > last_match_pos+TRAILING_LITERALS || price <= opt[pos].price) {
+ assert((U32)favorDecSpeed <= 1);
+ if (pos > last_match_pos+TRAILING_LITERALS
+ || price <= opt[pos].price - (int)favorDecSpeed) {
DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
pos, price, ml);
assert(pos < LZ4_OPT_NUM);
diff --git a/lib/lz4hc.h b/lib/lz4hc.h
index 7a25bee..bb5e073 100644
--- a/lib/lz4hc.h
+++ b/lib/lz4hc.h
@@ -141,35 +141,39 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
#include <stdint.h>
-typedef struct
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal
{
uint32_t hashTable[LZ4HC_HASHTABLESIZE];
uint16_t chainTable[LZ4HC_MAXD];
const uint8_t* end; /* next block here to continue on current prefix */
const uint8_t* base; /* All index relative to this position */
const uint8_t* dictBase; /* alternate base for extDict */
- void* inputBuffer; /* deprecated */
uint32_t dictLimit; /* below that point, need extDict */
uint32_t lowLimit; /* below that point, no more dict */
uint32_t nextToUpdate; /* index from which to continue dictionary update */
- int compressionLevel;
-} LZ4HC_CCtx_internal;
+ short compressionLevel;
+ short favorDecSpeed;
+ const LZ4HC_CCtx_internal* dictCtx;
+};
#else
-typedef struct
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal
{
unsigned int hashTable[LZ4HC_HASHTABLESIZE];
unsigned short chainTable[LZ4HC_MAXD];
const unsigned char* end; /* next block here to continue on current prefix */
const unsigned char* base; /* All index relative to this position */
const unsigned char* dictBase; /* alternate base for extDict */
- void* inputBuffer; /* deprecated */
unsigned int dictLimit; /* below that point, need extDict */
unsigned int lowLimit; /* below that point, no more dict */
unsigned int nextToUpdate; /* index from which to continue dictionary update */
- int compressionLevel;
-} LZ4HC_CCtx_internal;
+ short compressionLevel;
+ short favorDecSpeed;
+ const LZ4HC_CCtx_internal* dictCtx;
+};
#endif
@@ -206,7 +210,14 @@ LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_co
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-/* Deprecated Streaming functions; should no longer be used */
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, use of
+ * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
+ * than preserve a window-sized chunk of history.
+ */
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
@@ -244,9 +255,9 @@ LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") LZ4LIB_API int LZ4_resetStr
* `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`
*/
int LZ4_compress_HC_destSize(void* LZ4HC_Data,
- const char* src, char* dst,
- int* srcSizePtr, int targetDstSize,
- int compressionLevel);
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize,
+ int compressionLevel);
/*! LZ4_compress_HC_continue_destSize() : v1.8.0 (experimental)
* Similar as LZ4_compress_HC_continue(),
@@ -266,7 +277,71 @@ int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
*/
void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
-
+/*! LZ4_favorDecompressionSpeed() : v1.8.2 (experimental)
+ * Parser will select decisions favoring decompression over compression ratio.
+ * Only work at highest compression settings (level >= LZ4HC_CLEVEL_OPT_MIN)
+ */
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
+
+/*! LZ4_resetStreamHC_fast() :
+ * When an LZ4_streamHC_t is known to be in a internally coherent state,
+ * it can often be prepared for a new compression with almost no work, only
+ * sometimes falling back to the full, expensive reset that is always required
+ * when the stream is in an indeterminate state (i.e., the reset performed by
+ * LZ4_resetStreamHC()).
+ *
+ * LZ4_streamHCs are guaranteed to be in a valid state when:
+ * - returned from LZ4_createStreamHC()
+ * - reset by LZ4_resetStreamHC()
+ * - memset(stream, 0, sizeof(LZ4_streamHC_t))
+ * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
+ * - the stream was in a valid state and was then used in any compression call
+ * that returned success
+ * - the stream was in an indeterminate state and was used in a compression
+ * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
+ * returned success
+ */
+void LZ4_resetStreamHC_fast(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_compress_HC_extStateHC_fastReset() :
+ * A variant of LZ4_compress_HC_extStateHC().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStreamHC_fast() for a definition of
+ * "correctly initialized"). From a high level, the difference is that this
+ * function initializes the provided state with a call to
+ * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
+ * call to LZ4_resetStreamHC().
+ */
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
+
+/*! LZ4_attach_HC_dictionary() :
+ * This is an experimental API that allows for the efficient use of a
+ * static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_API void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream);
#endif /* LZ4_HC_SLO_098092834 */
#endif /* LZ4_HC_STATIC_LINKING_ONLY */
diff --git a/programs/lz4cli.c b/programs/lz4cli.c
index 42392eb..ba519b4 100644
--- a/programs/lz4cli.c
+++ b/programs/lz4cli.c
@@ -140,6 +140,7 @@ static int usage_advanced(const char* exeName)
DISPLAY( "--no-frame-crc : disable stream checksum (default:enabled) \n");
DISPLAY( "--content-size : compressed frame includes original size (default:not present)\n");
DISPLAY( "--[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)\n");
+ DISPLAY( "--favor-decSpeed: compressed files decompress faster, but are less compressed \n");
DISPLAY( "Benchmark arguments : \n");
DISPLAY( " -b# : benchmark file(s), using # compression level (default : 1) \n");
DISPLAY( " -e# : test all compression levels from -bX to # (default : 1)\n");
@@ -355,6 +356,7 @@ int main(int argc, const char** argv)
if (!strcmp(argument, "--no-content-size")) { LZ4IO_setContentSize(0); continue; }
if (!strcmp(argument, "--sparse")) { LZ4IO_setSparseFile(2); continue; }
if (!strcmp(argument, "--no-sparse")) { LZ4IO_setSparseFile(0); continue; }
+ if (!strcmp(argument, "--favor-decSpeed")) { LZ4IO_favorDecSpeed(1); continue; }
if (!strcmp(argument, "--verbose")) { displayLevel++; continue; }
if (!strcmp(argument, "--quiet")) { if (displayLevel) displayLevel--; continue; }
if (!strcmp(argument, "--version")) { DISPLAY(WELCOME_MESSAGE); return 0; }
diff --git a/programs/lz4io.c b/programs/lz4io.c
index 6d0d0d0..b52c1f3 100644
--- a/programs/lz4io.c
+++ b/programs/lz4io.c
@@ -56,8 +56,8 @@
#include "lz4io.h"
#include "lz4.h" /* still required for legacy format */
#include "lz4hc.h" /* still required for legacy format */
+#define LZ4F_STATIC_LINKING_ONLY
#include "lz4frame.h"
-#include "lz4frame_static.h"
/*****************************
@@ -116,6 +116,7 @@ static int g_blockIndependence = 1;
static int g_sparseFileSupport = 1;
static int g_contentSizeFlag = 0;
static int g_useDictionary = 0;
+static unsigned g_favorDecSpeed = 0;
static const char* g_dictionaryFilename = NULL;
@@ -221,6 +222,12 @@ int LZ4IO_setContentSize(int enable)
return g_contentSizeFlag;
}
+/* Default setting : 0 (disabled) */
+void LZ4IO_favorDecSpeed(int favor)
+{
+ g_favorDecSpeed = (favor!=0);
+}
+
static U32 g_removeSrcFile = 0;
void LZ4IO_setRemoveSrcFile(unsigned flag) { g_removeSrcFile = (flag>0); }
@@ -548,6 +555,7 @@ static int LZ4IO_compressFilename_extRess(cRess_t ress, const char* srcFileName,
prefs.frameInfo.blockSizeID = (LZ4F_blockSizeID_t)g_blockSizeId;
prefs.frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)g_blockChecksum;
prefs.frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)g_streamChecksum;
+ prefs.favorDecSpeed = g_favorDecSpeed;
if (g_contentSizeFlag) {
U64 const fileSize = UTIL_getFileSize(srcFileName);
prefs.frameInfo.contentSize = fileSize; /* == 0 if input == stdin */
diff --git a/programs/lz4io.h b/programs/lz4io.h
index b21b8b6..22c5e3e 100644
--- a/programs/lz4io.h
+++ b/programs/lz4io.h
@@ -94,10 +94,15 @@ int LZ4IO_setNotificationLevel(int level);
/* Default setting : 0 (disabled) */
int LZ4IO_setSparseFile(int enable);
-/* Default setting : 0 (disabled) */
+/* Default setting : 0 == no content size present in frame header */
int LZ4IO_setContentSize(int enable);
+/* Default setting : 0 == src file preserved */
void LZ4IO_setRemoveSrcFile(unsigned flag);
+/* Default setting : 0 == favor compression ratio
+ * Note : 1 only works for high compression levels (10+) */
+void LZ4IO_favorDecSpeed(int favor);
+
#endif /* LZ4IO_H_237902873 */
diff --git a/programs/util.h b/programs/util.h
index ff25106..ef6ca77 100644
--- a/programs/util.h
+++ b/programs/util.h
@@ -30,8 +30,9 @@ extern "C" {
* Dependencies
******************************************/
#include "platform.h" /* PLATFORM_POSIX_VERSION */
-#include <stdlib.h> /* malloc */
#include <stddef.h> /* size_t, ptrdiff_t */
+#include <stdlib.h> /* malloc */
+#include <string.h> /* strlen, strncpy */
#include <stdio.h> /* fprintf */
#include <sys/types.h> /* stat, utime */
#include <sys/stat.h> /* stat */
diff --git a/tests/Makefile b/tests/Makefile
index 2b93c9f..d4847b1 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -80,7 +80,8 @@ lz4c32: # create a 32-bits version for 32/64 interop tests
%.o : $(LZ4DIR)/%.c $(LZ4DIR)/%.h
$(CC) -c $(CFLAGS) $(CPPFLAGS) $< -o $@
-fullbench : lz4.o lz4hc.o lz4frame.o xxhash.o fullbench.c
+fullbench : DEBUGLEVEL=0
+fullbench : lz4.o lz4hc.o lz4frame.o xxhash.o fullbench.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
$(LZ4DIR)/liblz4.a:
diff --git a/tests/frametest.c b/tests/frametest.c
index 74d9c88..7d69ff7 100644
--- a/tests/frametest.c
+++ b/tests/frametest.c
@@ -41,7 +41,11 @@
#include <string.h> /* strcmp */
#include <time.h> /* clock_t, clock(), CLOCKS_PER_SEC */
#include <assert.h>
-#include "lz4frame_static.h"
+#include "lz4frame.h" /* include multiple times to test correctness/safety */
+#include "lz4frame.h"
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
+#include "lz4frame.h"
#include "lz4.h" /* LZ4_VERSION_STRING */
#define XXH_STATIC_LINKING_ONLY
#include "xxhash.h" /* XXH64 */
diff --git a/tests/fullbench.c b/tests/fullbench.c
index 1939aeb..ee2966f 100644
--- a/tests/fullbench.c
+++ b/tests/fullbench.c
@@ -184,6 +184,11 @@ static int local_LZ4_compress_default_small(const char* in, char* out, int inSiz
return LZ4_compress_default(in, out, inSize, LZ4_compressBound(inSize)-1);
}
+static int local_LZ4_compress_destSize(const char* in, char* out, int inSize)
+{
+ return LZ4_compress_destSize(in, out, &inSize, LZ4_compressBound(inSize)-1);
+}
+
static int local_LZ4_compress_fast0(const char* in, char* out, int inSize)
{
return LZ4_compress_fast(in, out, inSize, LZ4_compressBound(inSize), 0);
@@ -422,12 +427,13 @@ int fullSpeedBench(const char** fileNamesTable, int nbFiles)
case 0 : DISPLAY("Compression functions : \n"); continue;
case 1 : compressionFunction = local_LZ4_compress_default_large; compressorName = "LZ4_compress_default"; break;
case 2 : compressionFunction = local_LZ4_compress_default_small; compressorName = "LZ4_compress_default(small dst)"; break;
- case 3 : compressionFunction = local_LZ4_compress_fast0; compressorName = "LZ4_compress_fast(0)"; break;
- case 4 : compressionFunction = local_LZ4_compress_fast1; compressorName = "LZ4_compress_fast(1)"; break;
- case 5 : compressionFunction = local_LZ4_compress_fast2; compressorName = "LZ4_compress_fast(2)"; break;
- case 6 : compressionFunction = local_LZ4_compress_fast17; compressorName = "LZ4_compress_fast(17)"; break;
- case 7 : compressionFunction = local_LZ4_compress_fast_extState0; compressorName = "LZ4_compress_fast_extState(0)"; break;
- case 8 : compressionFunction = local_LZ4_compress_fast_continue0; initFunction = local_LZ4_createStream; compressorName = "LZ4_compress_fast_continue(0)"; break;
+ case 3 : compressionFunction = local_LZ4_compress_destSize; compressorName = "LZ4_compress_destSize"; break;
+ case 4 : compressionFunction = local_LZ4_compress_fast0; compressorName = "LZ4_compress_fast(0)"; break;
+ case 5 : compressionFunction = local_LZ4_compress_fast1; compressorName = "LZ4_compress_fast(1)"; break;
+ case 6 : compressionFunction = local_LZ4_compress_fast2; compressorName = "LZ4_compress_fast(2)"; break;
+ case 7 : compressionFunction = local_LZ4_compress_fast17; compressorName = "LZ4_compress_fast(17)"; break;
+ case 8 : compressionFunction = local_LZ4_compress_fast_extState0; compressorName = "LZ4_compress_fast_extState(0)"; break;
+ case 9 : compressionFunction = local_LZ4_compress_fast_continue0; initFunction = local_LZ4_createStream; compressorName = "LZ4_compress_fast_continue(0)"; break;
case 10: compressionFunction = local_LZ4_compress_HC; compressorName = "LZ4_compress_HC"; break;
case 12: compressionFunction = local_LZ4_compress_HC_extStateHC; compressorName = "LZ4_compress_HC_extStateHC"; break;
diff --git a/tests/fuzzer.c b/tests/fuzzer.c
index cadda21..1fbda8a 100644
--- a/tests/fuzzer.c
+++ b/tests/fuzzer.c
@@ -446,7 +446,12 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
/* Test compression HC using external state */
FUZ_DISPLAYTEST("test LZ4_compress_HC_extStateHC()");
ret = LZ4_compress_HC_extStateHC(stateLZ4HC, block, compressedBuffer, blockSize, (int)compressedBufferSize, compressionLevel);
- FUZ_CHECKTEST(ret==0, "LZ4_compress_HC_extStateHC() failed");
+ FUZ_CHECKTEST(ret==0, "LZ4_compress_HC_extStateHC() failed")
+
+ /* Test compression HC using fast reset external state */
+ FUZ_DISPLAYTEST("test LZ4_compress_HC_extStateHC_fastReset()");
+ ret = LZ4_compress_HC_extStateHC_fastReset(stateLZ4HC, block, compressedBuffer, blockSize, (int)compressedBufferSize, compressionLevel);
+ FUZ_CHECKTEST(ret==0, "LZ4_compress_HC_extStateHC_fastReset() failed");
/* Test compression using external state */
FUZ_DISPLAYTEST("test LZ4_compress_fast_extState()");
@@ -487,6 +492,31 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
ret = LZ4_decompress_fast(compressedBuffer, decodedBuffer, blockSize+1);
FUZ_CHECKTEST(ret>=0, "LZ4_decompress_fast should have failed, due to Output Size being too large");
+ /* Test decoding with empty input */
+ FUZ_DISPLAYTEST("LZ4_decompress_safe() with empty input");
+ LZ4_decompress_safe(NULL, decodedBuffer, 0, blockSize);
+
+ /* Test decoding with a one byte input */
+ FUZ_DISPLAYTEST("LZ4_decompress_safe() with one byte input");
+ { char const tmp = 0xFF;
+ LZ4_decompress_safe(&tmp, decodedBuffer, 1, blockSize);
+ }
+
+ /* Test decoding shortcut edge case */
+ FUZ_DISPLAYTEST("LZ4_decompress_safe() with shortcut edge case");
+ { char tmp[17];
+ /* 14 bytes of literals, followed by a 14 byte match.
+ * Should not read beyond the end of the buffer.
+ * See https://github.com/lz4/lz4/issues/508. */
+ *tmp = 0xEE;
+ memset(tmp + 1, 0, 14);
+ tmp[15] = 14;
+ tmp[16] = 0;
+ ret = LZ4_decompress_safe(tmp, decodedBuffer, sizeof(tmp), blockSize);
+ FUZ_CHECKTEST(ret >= 0, "LZ4_decompress_safe() should fail");
+ }
+
+
/* Test decoding with output size exactly what's necessary => must work */
FUZ_DISPLAYTEST();
decodedBuffer[blockSize] = 0;
@@ -810,6 +840,49 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
FUZ_CHECKTEST(crcCheck!=crcOrig, "LZ4_decompress_safe_usingDict corrupted decoded data");
}
+ /* Compress HC using external dictionary stream */
+ FUZ_DISPLAYTEST();
+ {
+ LZ4_streamHC_t LZ4_streamHC;
+
+ LZ4_resetStreamHC (&LZ4dictHC, compressionLevel);
+ LZ4_loadDictHC(&LZ4dictHC, dict, dictSize);
+ LZ4_resetStreamHC (&LZ4_streamHC, compressionLevel);
+ LZ4_attach_HC_dictionary(&LZ4_streamHC, &LZ4dictHC);
+ blockContinueCompressedSize = LZ4_compress_HC_continue(&LZ4_streamHC, block, compressedBuffer, blockSize, (int)compressedBufferSize);
+ FUZ_CHECKTEST(blockContinueCompressedSize==0, "LZ4_compress_HC_continue with ExtDictCtx failed");
+
+ FUZ_DISPLAYTEST();
+ LZ4_resetStreamHC (&LZ4_streamHC, compressionLevel);
+ LZ4_attach_HC_dictionary(&LZ4_streamHC, &LZ4dictHC);
+ ret = LZ4_compress_HC_continue(&LZ4_streamHC, block, compressedBuffer, blockSize, blockContinueCompressedSize-1);
+ FUZ_CHECKTEST(ret>0, "LZ4_compress_HC_continue using ExtDictCtx should fail : one missing byte for output buffer (%i != %i)", ret, blockContinueCompressedSize);
+
+ FUZ_DISPLAYTEST();
+ LZ4_resetStreamHC (&LZ4_streamHC, compressionLevel);
+ LZ4_attach_HC_dictionary(&LZ4_streamHC, &LZ4dictHC);
+ ret = LZ4_compress_HC_continue(&LZ4_streamHC, block, compressedBuffer, blockSize, blockContinueCompressedSize);
+ FUZ_CHECKTEST(ret!=blockContinueCompressedSize, "LZ4_compress_HC_continue using ExtDictCtx size is different (%i != %i)", ret, blockContinueCompressedSize);
+ FUZ_CHECKTEST(ret<=0, "LZ4_compress_HC_continue using ExtDictCtx should work : enough size available within output buffer");
+
+ FUZ_DISPLAYTEST();
+ LZ4_resetStreamHC_fast (&LZ4_streamHC, compressionLevel);
+ LZ4_attach_HC_dictionary(&LZ4_streamHC, &LZ4dictHC);
+ ret = LZ4_compress_HC_continue(&LZ4_streamHC, block, compressedBuffer, blockSize, blockContinueCompressedSize);
+ FUZ_CHECKTEST(ret!=blockContinueCompressedSize, "LZ4_compress_HC_continue using ExtDictCtx and fast reset size is different (%i != %i)", ret, blockContinueCompressedSize);
+ FUZ_CHECKTEST(ret<=0, "LZ4_compress_HC_continue using ExtDictCtx and fast reset should work : enough size available within output buffer");
+
+ FUZ_DISPLAYTEST();
+ decodedBuffer[blockSize] = 0;
+ ret = LZ4_decompress_safe_usingDict(compressedBuffer, decodedBuffer, blockContinueCompressedSize, blockSize, dict, dictSize);
+ FUZ_CHECKTEST(ret!=blockSize, "LZ4_decompress_safe_usingDict did not regenerate original data");
+ FUZ_CHECKTEST(decodedBuffer[blockSize], "LZ4_decompress_safe_usingDict overrun specified output buffer size");
+ { U32 const crcCheck = XXH32(decodedBuffer, blockSize, 0);
+ if (crcCheck!=crcOrig) FUZ_findDiff(block, decodedBuffer);
+ FUZ_CHECKTEST(crcCheck!=crcOrig, "LZ4_decompress_safe_usingDict corrupted decoded data");
+ }
+ }
+
/* Compress HC continue destSize */
FUZ_DISPLAYTEST();
{ int const availableSpace = (FUZ_rand(&randState) % blockSize) + 5;