summaryrefslogtreecommitdiffstats
path: root/lib/lz4.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lz4.c')
-rw-r--r--lib/lz4.c1181
1 files changed, 766 insertions, 415 deletions
diff --git a/lib/lz4.c b/lib/lz4.c
index 213b085..e51a3e0 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -69,9 +69,11 @@
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# if defined(__GNUC__) && \
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define LZ4_FORCE_MEMORY_ACCESS 2
-# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
# define LZ4_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -80,7 +82,7 @@
* LZ4_FORCE_SW_BITCOUNT
* Define this parameter if your target system or compiler does not support hardware bit count
*/
-#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
# define LZ4_FORCE_SW_BITCOUNT
#endif
@@ -89,6 +91,8 @@
/*-************************************
* Dependency
**************************************/
+#define LZ4_STATIC_LINKING_ONLY
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
#include "lz4.h"
/* see also "memory routines" below */
@@ -146,18 +150,23 @@
# define expect(expr,value) (expr)
#endif
+#ifndef likely
#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
#define unlikely(expr) expect((expr) != 0, 0)
+#endif
/*-************************************
* Memory routines
**************************************/
#include <stdlib.h> /* malloc, calloc, free */
-#define ALLOCATOR(n,s) calloc(n,s)
-#define FREEMEM free
+#define ALLOC(s) malloc(s)
+#define ALLOC_AND_ZERO(s) calloc(1,s)
+#define FREEMEM(p) free(p)
#include <string.h> /* memset, memcpy */
-#define MEM_INIT memset
+#define MEM_INIT(p,v,s) memset((p),(v),(s))
/*-************************************
@@ -270,11 +279,6 @@ static void LZ4_writeLE16(void* memPtr, U16 value)
}
}
-static void LZ4_copy8(void* dst, const void* src)
-{
- memcpy(dst,src,8);
-}
-
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_FORCE_O2_INLINE_GCC_PPC64LE
void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
@@ -283,7 +287,7 @@ void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
+ do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
}
@@ -321,7 +325,7 @@ static const int LZ4_minLength = (MFLIMIT+1);
# endif
#endif
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
# include <stdio.h>
@@ -450,10 +454,33 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru
/*-************************************
* Local Structures and types
**************************************/
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
-
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { notLimited = 0, limitedOutput = 1, fillOutput = 2 } limitedOutput_directive;
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
+ * content is in a separate context, pointed to by
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
+ * entries in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
@@ -497,10 +524,25 @@ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tab
return LZ4_hash4(LZ4_read32(p), tableType);
}
-static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
+static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */ assert(0); return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
+ }
+}
+
+static void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+ void* tableBase, tableType_t const tableType,
+ const BYTE* srcBase)
{
switch (tableType)
{
+ case clearedTable: { /* illegal! */ assert(0); return; }
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
@@ -513,19 +555,81 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
-static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32* const hashTable = (const U32*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16* const hashTable = (const U16*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
+ return hashTable[h];
+ }
+ assert(0); return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
- if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
- if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
- { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
}
-LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
+LZ4_FORCE_INLINE void LZ4_prepareTable(
+ LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if (cctx->tableType != clearedTable) {
+ if (cctx->tableType != tableType
+ || (tableType == byU16 && cctx->currentOffset + inputSize >= 0xFFFFU)
+ || (tableType == byU32 && cctx->currentOffset > 1 GB)
+ || tableType == byPtr
+ || inputSize >= 4 KB)
+ {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = clearedTable;
+ } else {
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster
+ * than compressing without a gap. However, compressing with
+ * currentOffset == 0 is faster still, so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
/** LZ4_compress_generic() :
inlined, to ensure branches are decided at compilation time */
@@ -534,50 +638,70 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
const char* const source,
char* const dest,
const int inputSize,
+ int *inputConsumed, /* only written when outputLimited == fillOutput */
const int maxOutputSize,
const limitedOutput_directive outputLimited,
const tableType_t tableType,
- const dict_directive dict,
+ const dict_directive dictDirective,
const dictIssue_directive dictIssue,
const U32 acceleration)
{
const BYTE* ip = (const BYTE*) source;
- const BYTE* base;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE* base = (const BYTE*) source - startIndex;
const BYTE* lowLimit;
- const BYTE* const lowRefLimit = ip - cctx->dictSize;
- const BYTE* const dictionary = cctx->dictionary;
- const BYTE* const dictEnd = dictionary + cctx->dictSize;
- const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
+
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
+ const BYTE* const dictionary =
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+ const U32 dictSize =
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
+ const BYTE* const dictEnd = dictionary + dictSize;
const BYTE* anchor = (const BYTE*) source;
const BYTE* const iend = ip + inputSize;
- const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
const BYTE* const matchlimit = iend - LASTLITERALS;
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE* dictBase = dictDirective == usingDictCtx ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
BYTE* op = (BYTE*) dest;
BYTE* const olimit = op + maxOutputSize;
+ U32 offset = 0;
U32 forwardH;
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
/* Init conditions */
+ if (outputLimited == fillOutput && maxOutputSize < 1) return 0; /* Impossible to store anything */
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
- switch(dict)
- {
- case noDict:
- default:
- base = (const BYTE*)source;
- lowLimit = (const BYTE*)source;
- break;
- case withPrefix64k:
- base = (const BYTE*)source - cctx->currentOffset;
- lowLimit = (const BYTE*)source - cctx->dictSize;
- break;
- case usingExtDict:
- base = (const BYTE*)source - cctx->currentOffset;
- lowLimit = (const BYTE*)source;
- break;
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
}
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = tableType;
+
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
@@ -585,12 +709,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* Main Loop */
for ( ; ; ) {
- ptrdiff_t refDelta = 0;
const BYTE* match;
BYTE* token;
/* Find a match */
- { const BYTE* forwardIp = ip;
+ if (tableType == byPtr) {
+ const BYTE* forwardIp = ip;
unsigned step = 1;
unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
do {
@@ -599,34 +723,89 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
forwardIp += step;
step = (searchMatchNb++ >> LZ4_skipTrigger);
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
- if (dict==usingExtDict) {
- if (match < (const BYTE*)source) {
- refDelta = dictDelta;
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+ } while ( (match+MAX_DISTANCE < ip)
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+
+ } else { /* byU32, byU16 */
+
+ const BYTE* forwardIp = ip;
+ unsigned step = 1;
+ unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const current = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex <= current);
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
+ assert(startIndex - matchIndex >= MINMATCH);
+ match = dictBase + matchIndex;
lowLimit = dictionary;
} else {
- refDelta = 0;
+ match = base + matchIndex;
lowLimit = (const BYTE*)source;
- } }
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */
+ assert(matchIndex < current);
+ if ((tableType != byU16) && (matchIndex+MAX_DISTANCE < current)) continue; /* too far */
+ if (tableType == byU16) assert((current - matchIndex) <= MAX_DISTANCE); /* too_far presumed impossible with byU16 */
- } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
- || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem) offset = current - matchIndex;
+ break; /* match found */
+ }
+
+ } while(1);
}
/* Catch up */
- while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
- if ((outputLimited) && /* Check output buffer overflow */
+ if ((outputLimited == limitedOutput) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
return 0;
+ if ((outputLimited == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
if (litLength >= RUN_MASK) {
int len = (int)litLength-RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
@@ -638,35 +817,70 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* Copy Literals */
LZ4_wildCopy(op, anchor, op+litLength);
op+=litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
}
_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputLimited == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
/* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
+ assert(offset <= MAX_DISTANCE && offset > 0);
+ LZ4_writeLE16(op, (U16)offset); op+=2;
+ } else {
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
+ assert(ip-match <= MAX_DISTANCE);
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
+ }
/* Encode MatchLength */
{ unsigned matchCode;
- if ((dict==usingExtDict) && (lowLimit==dictionary)) {
- const BYTE* limit;
- match += refDelta;
- limit = ip + (dictEnd-match);
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
+ && (lowLimit==dictionary) /* match within extDict */ ) {
+ const BYTE* limit = ip + (dictEnd-match);
+ assert(dictEnd > match);
if (limit > matchlimit) limit = matchlimit;
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
ip += MINMATCH + matchCode;
if (ip==limit) {
- unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
matchCode += more;
ip += more;
}
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
} else {
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
ip += MINMATCH + matchCode;
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
}
- if ( outputLimited && /* Check output buffer overflow */
- (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
- return 0;
+ if ((outputLimited) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) {
+ if (outputLimited == limitedOutput)
+ return 0;
+ if (outputLimited == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 2 - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ matchCode = newMatchCode;
+ }
+ }
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
@@ -685,37 +899,80 @@ _next_match:
anchor = ip;
/* Test end of chunk */
- if (ip > mflimit) break;
+ if (ip >= mflimitPlusOne) break;
/* Fill table */
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
/* Test next position */
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
- if (dict==usingExtDict) {
- if (match < (const BYTE*)source) {
- refDelta = dictDelta;
- lowLimit = dictionary;
- } else {
- refDelta = 0;
- lowLimit = (const BYTE*)source;
- } }
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
- if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
- && (match+MAX_DISTANCE>=ip)
- && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
+ if (tableType == byPtr) {
+
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ( (match+MAX_DISTANCE >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const current = (U32)(ip-base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
+ && ((tableType==byU16) ? 1 : (matchIndex+MAX_DISTANCE >= current))
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
+ token=op++;
+ *token=0;
+ if (maybe_extMem) offset = current - matchIndex;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
+ goto _next_match;
+ }
+ }
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
+
}
_last_literals:
/* Encode Last Literals */
- { size_t const lastRun = (size_t)(iend - anchor);
+ { size_t lastRun = (size_t)(iend - anchor);
if ( (outputLimited) && /* Check output buffer overflow */
- ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
- return 0;
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputLimited == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ lastRun = (olimit-op) - 1;
+ lastRun -= (lastRun+240)/255;
+ }
+ if (outputLimited == limitedOutput)
+ return 0;
+ }
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
@@ -725,44 +982,97 @@ _last_literals:
*op++ = (BYTE)(lastRun<<ML_BITS);
}
memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
op += lastRun;
}
- /* End */
- return (int) (((char*)op)-dest);
+ if (outputLimited == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
+ return (int)(((char*)op) - dest);
}
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
LZ4_resetStream((LZ4_stream_t*)state);
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
+{
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
- if (maxOutputSize >= LZ4_compressBound(inputSize)) {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
} else {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
}
}
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
+ int result;
#if (LZ4_HEAPMODE)
- void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctxPtr == NULL) return 0;
#else
LZ4_stream_t ctx;
- void* const ctxPtr = &ctx;
+ LZ4_stream_t* const ctxPtr = &ctx;
#endif
-
- int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
#if (LZ4_HEAPMODE)
FREEMEM(ctxPtr);
@@ -785,172 +1095,15 @@ int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int m
LZ4_resetStream(&ctx);
if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
else
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-}
-
-
-/*-******************************
-* *_destSize() variant
-********************************/
-
-static int LZ4_compress_destSize_generic(
- LZ4_stream_t_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- const int targetDstSize,
- const tableType_t tableType)
-{
- const BYTE* ip = (const BYTE*) src;
- const BYTE* base = (const BYTE*) src;
- const BYTE* lowLimit = (const BYTE*) src;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + *srcSizePtr;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = iend - LASTLITERALS;
-
- BYTE* op = (BYTE*) dst;
- BYTE* const oend = op + targetDstSize;
- BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
- BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
- BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
-
- U32 forwardH;
-
-
- /* Init conditions */
- if (targetDstSize < 1) return 0; /* Impossible to store anything */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
- if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
- if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
-
- /* First Byte */
- *srcSizePtr = 0;
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE* match;
- BYTE* token;
-
- /* Find a match */
- { const BYTE* forwardIp = ip;
- unsigned step = 1;
- unsigned searchMatchNb = 1 << LZ4_skipTrigger;
-
- do {
- U32 h = forwardH;
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
-
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
-
- match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
- forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
-
- } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match) != LZ4_read32(ip)) );
- }
-
- /* Catch up */
- while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
-
- /* Encode Literal length */
- { unsigned litLength = (unsigned)(ip - anchor);
- token = op++;
- if (op + ((litLength+240)/255) + litLength > oMaxLit) {
- /* Not enough space for a last match */
- op--;
- goto _last_literals;
- }
- if (litLength>=RUN_MASK) {
- unsigned len = litLength - RUN_MASK;
- *token=(RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
- }
- else *token = (BYTE)(litLength<<ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op+litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
-
- /* Encode MatchLength */
- { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
-
- if (op + ((matchLength+240)/255) > oMaxMatch) {
- /* Match description too long : reduce it */
- matchLength = (15-1) + (oMaxMatch-op) * 255;
- }
- ip += MINMATCH + matchLength;
-
- if (matchLength>=ML_MASK) {
- *token += ML_MASK;
- matchLength -= ML_MASK;
- while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
- *op++ = (BYTE)matchLength;
- }
- else *token += (BYTE)(matchLength);
- }
-
- anchor = ip;
-
- /* Test end of block */
- if (ip > mflimit) break;
- if (op > oMaxSeq) break;
-
- /* Fill table */
- LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- if ( (match+MAX_DISTANCE>=ip)
- && (LZ4_read32(match)==LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRunSize = (size_t)(iend - anchor);
- if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (oend-op) - 1;
- lastRunSize -= (lastRunSize+240)/255;
- }
- ip = anchor + lastRunSize;
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize<<ML_BITS);
- }
- memcpy(op, anchor, lastRunSize);
- op += lastRunSize;
- }
-
- /* End */
- *srcSizePtr = (int) (((const char*)ip)-src);
- return (int) (((char*)op)-dst);
+ return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
}
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
LZ4_resetStream(state);
@@ -958,18 +1111,20 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src,
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
} else {
- if (*srcSizePtr < LZ4_64Klimit)
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
- else
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
- }
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ } else {
+ tableType_t const tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, tableType, noDict, noDictIssue, 1);
+ } }
}
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
#if (LZ4_HEAPMODE)
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctx == NULL) return 0;
#else
LZ4_stream_t ctxBody;
LZ4_stream_t* ctx = &ctxBody;
@@ -991,21 +1146,28 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
LZ4_stream_t* LZ4_createStream(void)
{
- LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
+ LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL) return NULL;
LZ4_resetStream(lz4s);
return lz4s;
}
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
- DEBUGLOG(4, "LZ4_resetStream");
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
{
if (!LZ4_stream) return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
FREEMEM(LZ4_stream);
return (0);
}
@@ -1015,43 +1177,70 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
const BYTE* p = (const BYTE*)dictionary;
const BYTE* const dictEnd = p + dictSize;
const BYTE* base;
- if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
- LZ4_resetStream(LZ4_dict);
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
- if (dictSize < (int)HASH_UNIT) {
- dict->dictionary = NULL;
- dict->dictSize = 0;
- return 0;
- }
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
- dict->currentOffset += 64 KB;
- base = p - dict->currentOffset;
+ base = dictEnd - 64 KB - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
- dict->currentOffset += dict->dictSize;
+ dict->currentOffset += 64 KB;
+ dict->tableType = tableType;
+
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
while (p <= dictEnd-HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, byU32, base);
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
p+=3;
}
return dict->dictSize;
}
+void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream) {
+ if (dictionary_stream != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (working_stream->internal_donotuse.currentOffset == 0) {
+ working_stream->internal_donotuse.currentOffset = 64 KB;
+ }
+ working_stream->internal_donotuse.dictCtx = &(dictionary_stream->internal_donotuse);
+ } else {
+ working_stream->internal_donotuse.dictCtx = NULL;
+ }
+}
+
-static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
{
- if ((LZ4_dict->currentOffset > 0x80000000) ||
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */
+ if (LZ4_dict->currentOffset + nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
/* rescale hash table */
U32 const delta = LZ4_dict->currentOffset - 64 KB;
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
else LZ4_dict->hashTable[i] -= delta;
@@ -1065,15 +1254,25 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
+ const tableType_t tableType = byU32;
LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
- const BYTE* smallest = (const BYTE*) source;
if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
- if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
- LZ4_renormDictT(streamPtr, smallest);
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ /* invalidate tiny dictionaries */
+ if ( (streamPtr->dictSize-1 < 4) /* intentional underflow */
+ && (dictEnd != (const BYTE*)source) ) {
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)source;
+ dictEnd = (const BYTE*)source;
+ }
+
/* Check overlapping input/dictionary space */
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
@@ -1086,46 +1285,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE*)source) {
- int result;
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
else
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
- streamPtr->dictSize += (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
- return result;
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
}
/* external dictionary mode */
{ int result;
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
- else
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ }
+ } else {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ }
+ }
streamPtr->dictionary = (const BYTE*)source;
streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
return result;
}
}
-/* Hidden debug function, to force external dictionary mode */
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
int result;
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
- const BYTE* smallest = dictEnd;
- if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
- LZ4_renormDictT(streamPtr, smallest);
+ LZ4_renormDictT(streamPtr, srcSize);
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ }
streamPtr->dictionary = (const BYTE*)source;
- streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
+ streamPtr->dictSize = (U32)srcSize;
return result;
}
@@ -1196,41 +1410,72 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
const int safeDecode = (endOnInput==endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i)", srcSize);
/* Special cases */
if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */
if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
+ if ((endOnInput) && unlikely(srcSize==0)) return -1;
/* Main Loop : decode sequences */
while (1) {
- size_t length;
const BYTE* match;
size_t offset;
unsigned const token = *ip++;
-
- /* shortcut for common case :
- * in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes).
- * this shortcut was tested on x86 and x64, where it improves decoding speed.
- * it has not yet been benchmarked on ARM, Power, mips, etc. */
- if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend)
- & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend))
- & ((token < (15<<ML_BITS)) & ((token & ML_MASK) != 15)) ) {
- size_t const ll = token >> ML_BITS;
- size_t const off = LZ4_readLE16(ip+ll);
- const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */
- if ((off >= 18) /* do not deal with overlapping matches */ & (matchPtr >= lowPrefix)) {
- size_t const ml = (token & ML_MASK) + MINMATCH;
- memcpy(op, ip, 16); op += ll; ip += ll + 2 /*offset*/;
- memcpy(op, matchPtr, 18); op += ml;
+ size_t length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ( (endOnInput ? length != RUN_MASK : length <= 8)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip); ip += 2;
+ match = op - offset;
+
+ /* Do not deal with overlapping matches. */
+ if ( (length != ML_MASK)
+ && (offset >= 8)
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op +16, match +16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
continue;
}
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
}
/* decode literal length */
- if ((length=(token>>ML_BITS)) == RUN_MASK) {
+ if (length == RUN_MASK) {
unsigned s;
+ if (unlikely(endOnInput ? ip >= iend-RUN_MASK : 0)) goto _output_error; /* overflow detection */
do {
s = *ip++;
length += s;
@@ -1262,11 +1507,14 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
match = op - offset;
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
- LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
/* get matchlength */
length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
+
if (length == ML_MASK) {
unsigned s;
do {
@@ -1313,7 +1561,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
match += inc32table[offset];
memcpy(op+4, match, 4);
match -= dec64table[offset];
- } else { LZ4_copy8(op, match); match+=8; }
+ } else { memcpy(op, match, 8); match+=8; }
op += 8;
if (unlikely(cpy>oend-12)) {
@@ -1326,7 +1574,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
}
while (op<cpy) *op++ = *match++;
} else {
- LZ4_copy8(op, match);
+ memcpy(op, match, 8);
if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
}
op = cpy; /* correction */
@@ -1344,30 +1592,105 @@ _output_error:
}
+/*===== Instantiate the API decoding functions. =====*/
+
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ endOnInputSize, full, 0, noDict,
+ (BYTE*)dest, NULL, 0);
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ endOnInputSize, partial, targetOutputSize,
+ noDict, (BYTE*)dest, NULL, 0);
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
}
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ /* LZ4_decompress_fast doesn't validate match offsets,
+ * and thus serves well with any prefixed dictionary. */
+ return LZ4_decompress_fast(source, dest, originalSize);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE /* Exported under another name, for tests/fullbench.c */
+#define LZ4_decompress_safe_extDict LZ4_decompress_safe_forceExtDict
+int LZ4_decompress_safe_extDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
/*===== streaming decompression functions =====*/
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
{
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
return lz4s;
}
@@ -1378,12 +1701,11 @@ int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
return 0;
}
-/*!
- * LZ4_setStreamDecode() :
- * Use this function to instruct where to find the dictionary.
- * This function is not necessary if previous data is still available where it was decoded.
- * Loading a size of 0 is allowed (same effect as no dictionary).
- * Return : 1 if OK, 0 if error
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
*/
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
{
@@ -1395,6 +1717,25 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
return 1;
}
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0) return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+ if (maxBlockSize < 16) maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
/*
*_continue() :
These decoding functions allow decompression of multiple blocks in "streaming" mode.
@@ -1408,19 +1749,32 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE*)dest) {
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize += result;
lz4sd->prefixEnd += result;
} else {
+ /* The buffer wraps around, or they're switching to another buffer. */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_safe_extDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize = result;
lz4sd->prefixEnd = (BYTE*)dest + result;
@@ -1435,19 +1789,26 @@ int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE*)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize += originalSize;
lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize = originalSize;
lz4sd->prefixEnd = (BYTE*)dest + originalSize;
@@ -1464,36 +1825,23 @@ Advanced decoding functions :
the dictionary must be explicitly provided within parameters
*/
-LZ4_FORCE_O2_GCC_PPC64LE
-LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
{
if (dictSize==0)
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
if (dictStart+dictSize == dest) {
- if (dictSize >= (int)(64 KB - 1))
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
+ if (dictSize >= 64 KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+ return LZ4_decompress_safe_extDict(source, dest, compressedSize, maxOutputSize, dictStart, dictSize);
}
-LZ4_FORCE_O2_GCC_PPC64LE
-int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
-}
-
-LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
-}
-
-/* debug function */
-LZ4_FORCE_O2_GCC_PPC64LE
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+ if (dictSize==0 || dictStart+dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, dictSize);
}
@@ -1501,64 +1849,67 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres
* Obsolete Functions
***************************************************/
/* obsolete compression functions */
-int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
-int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
-int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
-int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
-int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
-int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize));
+}
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
+}
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
+}
/*
-These function names are deprecated and should no longer be used.
+These decompression functions are deprecated and should no longer be used.
They are only provided here for compatibility with older user programs.
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
*/
-int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
-
+int LZ4_uncompress (const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
/* Obsolete Streaming functions */
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
-static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
-{
- MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
- lz4ds->internal_donotuse.bufferStart = base;
-}
-
int LZ4_resetStreamState(void* state, char* inputBuffer)
{
- if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
- LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t*)state);
return 0;
}
void* LZ4_create (char* inputBuffer)
{
- LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
- LZ4_init (lz4ds, (BYTE*)inputBuffer);
- return lz4ds;
+ (void)inputBuffer;
+ return LZ4_createStream();
}
-char* LZ4_slideInputBuffer (void* LZ4_Data)
-{
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
- int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
- return (char*)(ctx->bufferStart + dictSize);
-}
-
-/* Obsolete streaming decompression functions */
-
-int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
-}
-
-int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+char* LZ4_slideInputBuffer (void* state)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
}
#endif /* LZ4_COMMONDEFS_ONLY */