summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYann Collet <yann.collet.73@gmail.com>2014-05-19 23:40:29 (GMT)
committerYann Collet <yann.collet.73@gmail.com>2014-05-19 23:40:29 (GMT)
commit4db6b03fceac50961a8f127aa2eda73d3373a1fe (patch)
treef7e81ed5b643e430126fa7a8c82d16e6f3b33799
parent7bcb3b2e9f36ad6adef2cb43858a8f3adb39c527 (diff)
downloadlz4-4db6b03fceac50961a8f127aa2eda73d3373a1fe.zip
lz4-4db6b03fceac50961a8f127aa2eda73d3373a1fe.tar.gz
lz4-4db6b03fceac50961a8f127aa2eda73d3373a1fe.tar.bz2
First version of Block Streaming API : LZ4_compress_usingDict()
-rw-r--r--lz4.c537
-rw-r--r--lz4.h144
-rw-r--r--programs/Makefile19
-rw-r--r--programs/fullbench.c68
-rw-r--r--programs/fuzzer.c63
-rw-r--r--programs/lz4.13
-rw-r--r--programs/lz4cli.c3
7 files changed, 543 insertions, 294 deletions
diff --git a/lz4.c b/lz4.c
index 8001edb..98a6ea8 100644
--- a/lz4.c
+++ b/lz4.c
@@ -35,15 +35,6 @@
Tuning parameters
**************************************/
/*
- * MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio
- * Reduced memory usage can improve speed, due to cache effect
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
-#define MEMORY_USAGE 14
-
-/*
* HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
* in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
@@ -118,7 +109,6 @@
#endif
#ifdef _MSC_VER /* Visual Studio */
-# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# if LZ4_ARCH64 /* 64-bits */
# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
@@ -128,15 +118,6 @@
# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
# endif
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#else
-# ifdef __GNUC__
-# define FORCE_INLINE static inline __attribute__((always_inline))
-# else
-# define FORCE_INLINE static inline
-# endif
-#endif
-
-#ifdef _MSC_VER /* Visual Studio */
# define lz4_bswap16(x) _byteswap_ushort(x)
#else
# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
@@ -224,9 +205,9 @@ typedef struct {size_t v;} _PACKED size_t_S;
/**************************************
Constants
**************************************/
-#define LZ4_HASHLOG (MEMORY_USAGE-2)
-#define HASHTABLESIZE (1 << MEMORY_USAGE)
-#define HASHNBCELLS4 (1 << LZ4_HASHLOG)
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
#define MINMATCH 4
@@ -255,16 +236,24 @@ static const int LZ4_minLength = (MFLIMIT+1);
Structures and local types
**************************************/
typedef struct {
- U32 hashTable[HASHNBCELLS4];
+ U32 hashTable[HASH_SIZE_U32];
const BYTE* bufferStart;
const BYTE* base;
const BYTE* nextBlock;
} LZ4_Data_Structure;
+typedef struct {
+ U32 hashTable[HASH_SIZE_U32];
+ U32 currentOffset;
+ U32 initCheck;
+ const BYTE* dictionary;
+ U32 dictSize;
+} LZ4_dict_t_internal;
+
typedef enum { notLimited = 0, limited = 1 } limitedOutput_directive;
typedef enum { byPtr, byU32, byU16 } tableType_t;
-typedef enum { noDict = 0, withPrefix64k = 1, withExtDict=2 } dict_directive;
+typedef enum { noDict = 0, withPrefix64k = 1, usingDict = 2 } dict_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
typedef enum { full = 0, partial = 1 } earlyEnd_directive;
@@ -289,12 +278,12 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
/**************************************
Macros
**************************************/
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
#if LZ4_ARCH64 || !defined(__GNUC__)
-# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
+# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
#else
-# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
+# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
#endif
-#define LZ4_SECURECOPY(d,s,e) { if (d<e) LZ4_WILDCOPY(d,s,e); }
/****************************
@@ -302,7 +291,7 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
****************************/
#if LZ4_ARCH64
-FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
+int LZ4_NbCommonBytes (register U64 val)
{
# if defined(LZ4_BIG_ENDIAN)
# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@@ -334,7 +323,7 @@ FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
#else
-FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
+int LZ4_NbCommonBytes (register U32 val)
{
# if defined(LZ4_BIG_ENDIAN)
# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@@ -371,7 +360,7 @@ FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
****************************/
int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
+static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
{
if (tableType == byU16)
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
@@ -379,9 +368,9 @@ FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
}
-FORCE_INLINE int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
+static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
-FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
switch (tableType)
{
@@ -391,27 +380,45 @@ FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, t
}
}
-FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
U32 h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
-FORCE_INLINE const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
{ U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
}
-FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
U32 h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
+static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ while (likely(pIn<pInLimit-(STEPSIZE-1)))
+ {
+ size_t diff = AARCH(pRef) ^ AARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+ if (LZ4_ARCH64) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
+ if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
+ if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
+
+ return (unsigned)(pIn - pStart);
+}
+
-FORCE_INLINE int LZ4_compress_generic(
+int LZ4_compress_generic(
void* ctx,
const char* source,
char* dest,
@@ -423,7 +430,7 @@ FORCE_INLINE int LZ4_compress_generic(
dict_directive dict)
{
const BYTE* ip = (const BYTE*) source;
- const BYTE* const base = (dict==withPrefix64k) ? ((LZ4_Data_Structure*)ctx)->base : (const BYTE*) source;
+ const BYTE* base;
const BYTE* const lowLimit = ((dict==withPrefix64k) ? ((LZ4_Data_Structure*)ctx)->bufferStart : (const BYTE*)source);
const BYTE* anchor = (const BYTE*) source;
const BYTE* const iend = ip + inputSize;
@@ -433,16 +440,27 @@ FORCE_INLINE int LZ4_compress_generic(
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + maxOutputSize;
- int length;
const int skipStrength = SKIPSTRENGTH;
U32 forwardH;
/* Init conditions */
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
- if ((dict==withPrefix64k) && (ip != ((LZ4_Data_Structure*)ctx)->nextBlock)) return 0; /* must continue from end of previous block */
- if (dict==withPrefix64k) ((LZ4_Data_Structure*)ctx)->nextBlock=iend; /* do it now, due to potential early exit */
- if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+ //if (tableType==byPtr) tableType=byU32;
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
+ switch(dict)
+ {
+ case noDict:
+ default:
+ base = (const BYTE*)source; break;
+ case withPrefix64k:
+ base =((LZ4_Data_Structure*)ctx)->base;
+ if (ip != ((LZ4_Data_Structure*)ctx)->nextBlock) return 0; /* must continue from end of previous block */
+ ((LZ4_Data_Structure*)ctx)->nextBlock = iend; /* do it now, due to potential early exit */
+ break;
+ case usingDict:
+ base = (const BYTE*)source - ((LZ4_dict_t_internal*)ctx)->currentOffset; break;
+ }
+ if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition(ip, ctx, tableType, base);
@@ -451,22 +469,21 @@ FORCE_INLINE int LZ4_compress_generic(
/* Main Loop */
for ( ; ; )
{
- int findMatchAttempts = (1U << skipStrength) + 3;
+ int searchMatchNb = (1U << skipStrength) + 3;
const BYTE* forwardIp = ip;
const BYTE* ref;
BYTE* token;
/* Find a match */
do {
+ int step = searchMatchNb++ >> skipStrength;
U32 h = forwardH;
- int step = findMatchAttempts++ >> skipStrength;
ip = forwardIp;
- forwardIp = ip + step;
+ forwardIp += step;
- if (unlikely(forwardIp > mflimit)) { goto _last_literals; }
-
- forwardH = LZ4_hashPosition(forwardIp, tableType);
ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
+ if (unlikely(ip >= mflimit)) goto _last_literals;
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
} while ((ref + MAX_DISTANCE < ip) || (A32(ref) != A32(ip)));
@@ -474,56 +491,50 @@ FORCE_INLINE int LZ4_compress_generic(
/* Catch up */
while ((ip>anchor) && (ref > lowLimit) && (unlikely(ip[-1]==ref[-1]))) { ip--; ref--; }
- /* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- if ((limitedOutput) && (unlikely(op + length + (2 + 1 + LASTLITERALS) + (length/255) > oend))) return 0; /* Check output limit */
- if (length>=(int)RUN_MASK)
{
- int len = length-RUN_MASK;
- *token=(RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
- }
- else *token = (BYTE)(length<<ML_BITS);
+ unsigned litLength;
- /* Copy Literals */
- { BYTE* end=(op)+(length); LZ4_WILDCOPY(op,anchor,end); op=end; }
+ /* Encode Literal length */
+ litLength = (ip - anchor);
+ token = op++;
+ if ((limitedOutput) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > oend))) return 0; /* Check output limit */
+ if (litLength>=RUN_MASK)
+ {
+ int len = (int)litLength-RUN_MASK;
+ *token=(RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ { BYTE* end=(op)+(litLength); LZ4_WILDCOPY(op,anchor,end); op=end; }
+ }
_next_match:
/* Encode Offset */
LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
- /* Start Counting */
- ip+=MINMATCH; ref+=MINMATCH; /* MinMatch already verified */
- anchor = ip;
- while (likely(ip<matchlimit-(STEPSIZE-1)))
- {
- size_t diff = AARCH(ref) ^ AARCH(ip);
- if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
- ip += LZ4_NbCommonBytes(diff);
- goto _endCount;
- }
- if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
- if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
- if ((ip<matchlimit) && (*ref == *ip)) ip++;
-_endCount:
-
/* Encode MatchLength */
- length = (int)(ip - anchor);
- if ((limitedOutput) && (unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend))) return 0; /* Check output limit */
- if (length>=(int)ML_MASK)
{
- *token += ML_MASK;
- length -= ML_MASK;
- for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
- if (length >= 255) { length-=255; *op++ = 255; }
- *op++ = (BYTE)length;
+ unsigned matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, matchlimit);
+ ip += matchLength + MINMATCH;
+ if (matchLength>=ML_MASK)
+ {
+ if ((limitedOutput) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > oend))) return 0; /* Check output limit */
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ for (; matchLength > 509 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
+ if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
+ *op++ = (BYTE)matchLength;
+ }
+ else *token += (BYTE)(matchLength);
}
- else *token += (BYTE)(length);
+
+ anchor = ip;
/* Test end of chunk */
- if (ip > mflimit) { anchor = ip; break; }
+ if (ip > mflimit) break;
/* Fill table */
LZ4_putPosition(ip-2, ctx, tableType, base);
@@ -534,8 +545,7 @@ _endCount:
if ((ref + MAX_DISTANCE >= ip) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
/* Prepare next loop */
- anchor = ip++;
- forwardH = LZ4_hashPosition(ip, tableType);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
@@ -557,9 +567,9 @@ _last_literals:
int LZ4_compress(const char* source, char* dest, int inputSize)
{
#if (HEAPMODE)
- void* ctx = ALLOCATOR(HASHNBCELLS4, 4); /* Aligned on 4-bytes boundaries */
+ void* ctx = ALLOCATOR(LZ4_DICTSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
#else
- U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
+ U32 ctx[LZ4_DICTSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
#endif
int result;
@@ -577,9 +587,9 @@ int LZ4_compress(const char* source, char* dest, int inputSize)
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
{
#if (HEAPMODE)
- void* ctx = ALLOCATOR(HASHNBCELLS4, 4); /* Aligned on 4-bytes boundaries */
+ void* ctx = ALLOCATOR(LZ4_DICTSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
#else
- U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
+ U32 ctx[LZ4_DICTSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
#endif
int result;
@@ -596,10 +606,10 @@ int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, in
/*****************************
- Using external allocation
+ User-allocated state
*****************************/
-int LZ4_sizeofState() { return 1 << MEMORY_USAGE; }
+int LZ4_sizeofState() { return LZ4_DICTSIZE; }
int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
@@ -626,87 +636,223 @@ int LZ4_compress_limitedOutput_withState (void* state, const char* source, char*
}
-/****************************
- Stream functions
-****************************/
+/***************************************
+ Experimental : Streaming functions
+***************************************/
-int LZ4_sizeofStreamState()
+void LZ4_renormDictT(LZ4_dict_t_internal* LZ4_dict, const char* source)
{
- return sizeof(LZ4_Data_Structure);
+ if ((source - LZ4_dict->currentOffset > source)
+ || (LZ4_dict->currentOffset > 0x80000000))
+ {
+ /* rescale hash table */
+ U32 delta = LZ4_dict->currentOffset - 64 KB;
+ int i;
+ for (i=0; i<HASH_SIZE_U32; i++)
+ {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ }
}
-FORCE_INLINE void LZ4_init(LZ4_Data_Structure* lz4ds, const BYTE* base)
-{
- MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
- lz4ds->bufferStart = base;
- lz4ds->base = base;
- lz4ds->nextBlock = base;
-}
-int LZ4_resetStreamState(void* state, const char* inputBuffer)
+int LZ4_compress_usingDict (LZ4_dict_t* LZ4_dict, const char* source, char* dest, int inputSize)
{
- if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
- LZ4_init((LZ4_Data_Structure*)state, (const BYTE*)inputBuffer);
- return 0;
-}
+ LZ4_dict_t_internal* const streamPtr = (LZ4_dict_t_internal*)LZ4_dict;
+ const int maxOutputSize = 0;
+ const limitedOutput_directive limitedOutput = notLimited;
+ const tableType_t tableType = byU32;
+ U32 currentOffset;
+ const U32 dictSize = streamPtr->dictSize;
+ const BYTE* const dictionary = streamPtr->dictionary;
-void* LZ4_create (const char* inputBuffer)
-{
- void* lz4ds = ALLOCATOR(1, sizeof(LZ4_Data_Structure));
- LZ4_init ((LZ4_Data_Structure*)lz4ds, (const BYTE*)inputBuffer);
- return lz4ds;
-}
+ if (streamPtr->initCheck) return 0; /* structure not initialized */
+ LZ4_renormDictT(streamPtr, source);
+ currentOffset = streamPtr->currentOffset;
-int LZ4_free (void* LZ4_Data)
-{
- FREEMEM(LZ4_Data);
- return (0);
-}
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += inputSize;
+ {
+ U32 ipIndex = currentOffset;
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const base = (const BYTE*)source - currentOffset;
+ const BYTE* const dictEnd = dictionary + dictSize;
+ const BYTE* const dictBase = dictEnd - currentOffset;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const U32 indexLimit = ipIndex + inputSize - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const oend = op + maxOutputSize;
+
+ const int skipStrength = SKIPSTRENGTH;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ if (ipIndex==0) ip++;
+
+ /* Main Loop */
+ for ( ; ; )
+ {
+ int searchMatchNb = (1U << skipStrength) - 1;
+ const BYTE* ref;
+ const BYTE* lowLimit;
+ BYTE* token;
+ U32 refIndex;
+ ipIndex = (U32)(ip - base);
+
+ /* Find a match */
+ do {
+ U32 h;
+
+ ipIndex += searchMatchNb++ >> skipStrength;
+ h = LZ4_hashPosition(base + ipIndex, tableType);
+ refIndex = streamPtr->hashTable[h];
+ streamPtr->hashTable[h] = ipIndex;
+
+ if (unlikely(ipIndex > indexLimit)) goto _last_literals;
+
+ ip = base + ipIndex;
+ if (refIndex < currentOffset)
+ {
+ ref = dictBase + refIndex;
+ lowLimit = dictionary;
+ }
+ else
+ {
+ ref = base + refIndex;
+ lowLimit = (const BYTE*)source;
+ }
-char* LZ4_slideInputBuffer (void* LZ4_Data)
-{
- LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
- size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
+ } while ((refIndex + MAX_DISTANCE < ipIndex) || (A32(ref) != A32(ip)));
- if ( (lz4ds->base - delta > lz4ds->base) /* underflow control */
- || ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) ) /* close to 32-bits limit */
- {
- size_t deltaLimit = (lz4ds->nextBlock - 64 KB) - lz4ds->base;
- int nH;
+ /* Catch up */
+ while ((ip>anchor) && (ref>lowLimit) && (unlikely(ip[-1]==ref[-1]))) { ip--; ref--; }
+
+ {
+ /* Encode Literal length */
+ unsigned litLength = (unsigned)(ip - anchor);
+
+ token = op++;
+ if ((limitedOutput) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > oend))) return 0; /* Check output limit */
+ if (litLength>=RUN_MASK)
+ {
+ unsigned remaininglength = litLength - RUN_MASK;
+ *token=(RUN_MASK<<ML_BITS);
+ for(; remaininglength >= 255 ; remaininglength-=255) *op++ = 255;
+ *op++ = (BYTE)remaininglength;
+ }
+ else *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ { BYTE* end=op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
+ }
+
+ /* Encode Offset */
+ LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ipIndex-refIndex));
+
+ /* Encode MatchLength */
+ {
+ unsigned matchLength;
+ if (refIndex >= currentOffset)
+ matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, matchlimit);
+ else
+ {
+ matchLength = 0;
+ const BYTE* dicLimit = ip + (dictEnd - ref);
+ if (dicLimit > matchlimit) dicLimit = matchlimit;
+ matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, dicLimit);
+ if (ref + MINMATCH + matchLength == dictEnd)
+ matchLength += LZ4_count(ip+MINMATCH+matchLength, (const BYTE*)source, matchlimit);
+ }
+ ip += matchLength + MINMATCH;
+ if (matchLength>=ML_MASK)
+ {
+ if ((limitedOutput) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > oend))) return 0; /* Check output limit */
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ for (; matchLength > 509 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
+ if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
+ *op++ = (BYTE)matchLength;
+ }
+ else *token += (BYTE)(matchLength);
+ }
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip > mflimit) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, streamPtr->hashTable, tableType, base);
+ }
- for (nH=0; nH < HASHNBCELLS4; nH++)
+ _last_literals:
+ /* Encode Last Literals */
{
- if ((size_t)(lz4ds->hashTable[nH]) < deltaLimit) lz4ds->hashTable[nH] = 0;
- else lz4ds->hashTable[nH] -= (U32)deltaLimit;
+ int lastRun = (int)(iend - anchor);
+ if ((limitedOutput) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */
+ if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
+ else *op++ = (BYTE)(lastRun<<ML_BITS);
+ memcpy(op, anchor, iend - anchor);
+ op += iend-anchor;
}
- memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
- lz4ds->base = lz4ds->bufferStart;
- lz4ds->nextBlock = lz4ds->base + 64 KB;
- }
- else
- {
- memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
- lz4ds->nextBlock -= delta;
- lz4ds->base -= delta;
- }
- return (char*)(lz4ds->nextBlock);
+ /* End */
+ return (int) (((char*)op)-dest);
+ }
}
-int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize)
+int LZ4_setDictPos (LZ4_dict_t* LZ4_dict, const char* dictionary, int dictSize)
{
- return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, 0, notLimited, byU32, withPrefix64k);
+ LZ4_dict_t_internal* dict = (LZ4_dict_t_internal*) LZ4_dict;
+
+ dict->dictionary = (const BYTE*)dictionary;
+ dict->dictSize = (U32)dictSize;
+ if (dict->currentOffset < dict->dictSize) return 0;
+ return 1;
}
-int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
+
+int LZ4_loadDict (LZ4_dict_t* LZ4_dict, const char* dictionary, int dictSize)
{
- return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, maxOutputSize, limited, byU32, withPrefix64k);
+ LZ4_dict_t_internal* dict = (LZ4_dict_t_internal*) LZ4_dict;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
+
+ LZ4_STATIC_ASSERT(LZ4_DICTSIZE >= sizeof(LZ4_dict_t_internal)); /* A compilation error here means LZ4_DICTSIZE is not large enough */
+ if (dict->initCheck) MEM_INIT(dict, 0, sizeof(LZ4_dict_t_internal));
+
+ if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB;
+ if (dictSize < MINMATCH) p = dictEnd;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd-MINMATCH)
+ {
+ LZ4_putPosition(p, dict, byU32, base);
+ p+=3;
+ }
+
+ return 1;
}
+
/****************************
Decompression functions
****************************/
@@ -716,7 +862,7 @@ int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, cha
* Note that it is essential this generic function is really inlined,
* in order to remove useless branches during compilation optimisation.
*/
-FORCE_INLINE int LZ4_decompress_generic(
+int LZ4_decompress_generic(
const char* source,
char* dest,
int inputSize,
@@ -740,7 +886,7 @@ FORCE_INLINE int LZ4_decompress_generic(
BYTE* cpy;
BYTE* oexit = op + targetOutputSize;
- const BYTE* const dictEnd = (dict==withExtDict) ? (const BYTE*)dictStart + dictSize : NULL;
+ const BYTE* const dictEnd = (dict==usingDict) ? (const BYTE*)dictStart + dictSize : NULL;
const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
@@ -814,7 +960,7 @@ FORCE_INLINE int LZ4_decompress_generic(
}
/* check external dictionary */
- if ((dict==withExtDict) && (unlikely(ref < (BYTE* const)dest)))
+ if ((dict==usingDict) && (unlikely(ref < (BYTE* const)dest)))
{
if (unlikely(op+length+MINMATCH > oend-LASTLITERALS)) goto _output_error;
@@ -830,7 +976,7 @@ FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
copySize = length+MINMATCH - copySize;
- if (copySize > (size_t)((char*)op-dest))
+ if (copySize > (size_t)((char*)op-dest)) /* overlap */
{
BYTE* const cpy = op + copySize;
const BYTE* ref = (BYTE*)dest;
@@ -862,7 +1008,7 @@ FORCE_INLINE int LZ4_decompress_generic(
if (unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4)))
{
if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
- LZ4_SECURECOPY(op, ref, (oend-COPYLENGTH));
+ if (op<oend-COPYLENGTH) LZ4_WILDCOPY(op, ref, (oend-COPYLENGTH));
while(op<cpy) *op++=*ref++;
op=cpy;
continue;
@@ -895,7 +1041,7 @@ int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compre
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withExtDict, dictStart, dictSize);
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingDict, dictStart, dictSize);
}
int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxOutputSize)
@@ -919,7 +1065,7 @@ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int origin
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withExtDict, dictStart, dictSize);
+ return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingDict, dictStart, dictSize);
}
@@ -932,3 +1078,80 @@ They are provided here for compatibility with existing user programs.
*/
int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState()
+{
+ return sizeof(LZ4_Data_Structure);
+}
+
+void LZ4_init(LZ4_Data_Structure* lz4ds, const BYTE* base)
+{
+ MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
+ lz4ds->bufferStart = base;
+ lz4ds->base = base;
+ lz4ds->nextBlock = base;
+}
+
+int LZ4_resetStreamState(void* state, const char* inputBuffer)
+{
+ if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
+ LZ4_init((LZ4_Data_Structure*)state, (const BYTE*)inputBuffer);
+ return 0;
+}
+
+void* LZ4_create (const char* inputBuffer)
+{
+ void* lz4ds = ALLOCATOR(1, sizeof(LZ4_Data_Structure));
+ LZ4_init ((LZ4_Data_Structure*)lz4ds, (const BYTE*)inputBuffer);
+ return lz4ds;
+}
+
+int LZ4_free (void* LZ4_Data)
+{
+ FREEMEM(LZ4_Data);
+ return (0);
+}
+
+
+char* LZ4_slideInputBuffer (void* LZ4_Data)
+{
+ LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
+ size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
+
+ if ( (lz4ds->base - delta > lz4ds->base) /* underflow control */
+ || ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) ) /* close to 32-bits limit */
+ {
+ size_t deltaLimit = (lz4ds->nextBlock - 64 KB) - lz4ds->base;
+ int nH;
+
+ for (nH=0; nH < HASH_SIZE_U32; nH++)
+ {
+ if ((size_t)(lz4ds->hashTable[nH]) < deltaLimit) lz4ds->hashTable[nH] = 0;
+ else lz4ds->hashTable[nH] -= (U32)deltaLimit;
+ }
+ memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
+ lz4ds->base = lz4ds->bufferStart;
+ lz4ds->nextBlock = lz4ds->base + 64 KB;
+ }
+ else
+ {
+ memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
+ lz4ds->nextBlock -= delta;
+ lz4ds->base -= delta;
+ }
+
+ return (char*)(lz4ds->nextBlock);
+}
+
+
+int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, 0, notLimited, byU32, withPrefix64k);
+}
+
+int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, maxOutputSize, limited, byU32, withPrefix64k);
+}
diff --git a/lz4.h b/lz4.h
index 37bd282..5e695c7 100644
--- a/lz4.h
+++ b/lz4.h
@@ -28,8 +28,8 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- LZ4 source repository : http://code.google.com/p/lz4/
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
#pragma once
@@ -47,11 +47,16 @@ extern "C" {
/**************************************
- Compiler Options
+ Tuning parameter
**************************************/
-#if (defined(__GNUC__) && defined(__STRICT_ANSI__)) || (defined(_MSC_VER) && !defined(__cplusplus)) /* Visual Studio */
-# define inline __inline /* Visual C is not C99, but supports some kind of inline */
-#endif
+/*
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#define LZ4_MEMORY_USAGE 14
/**************************************
@@ -72,10 +77,14 @@ LZ4_compress() :
or 0 if the compression fails
LZ4_decompress_safe() :
- maxOutputSize : is the size of the destination buffer (which must be already allocated)
+ compressedSize : is obviously the source size
+ maxOutputSize : is the size of the destination buffer, which must be already allocated.
return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
+ If the destination buffer is not large enough, decoding will stop and output an error code (<0).
If the source stream is detected malformed, the function will stop decoding and return a negative result.
- This function is protected against buffer overflow exploits (never writes outside of output buffer, and never reads outside of input buffer). Therefore, it is protected against malicious data packets
+ This function is protected against buffer overflow exploits :
+ it never writes outside of output buffer, and never reads outside of input buffer.
+ Therefore, it is protected against malicious data packets.
*/
@@ -89,7 +98,6 @@ LZ4_decompress_safe() :
LZ4_compressBound() :
Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
primarily useful for memory allocation of output buffer.
- inline function is recommended for the general case,
macro is also provided when result needs to be evaluated at compilation (such as stack memory allocation).
isize : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE
@@ -143,11 +151,11 @@ int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedS
/*
-These functions are provided should you prefer to allocate memory for compression tables with your own allocation methods.
-To know how much memory must be allocated for the compression tables, use :
+The following functions are provided should you prefer to allocate table memory using your own allocation methods.
int LZ4_sizeofState();
+provides the size to allocate for compression tables.
-Note that tables must be aligned on 4-bytes boundaries, otherwise compression will fail (return code 0).
+Tables must be aligned on 4-bytes boundaries, otherwise compression will fail (return code 0).
The allocated memory can be provided to the compressions functions using 'void* state' parameter.
LZ4_compress_withState() and LZ4_compress_limitedOutput_withState() are equivalent to previously described functions.
@@ -159,67 +167,52 @@ int LZ4_compress_limitedOutput_withState (void* state, const char* source, char*
/**************************************
- Streaming Functions
+ Experimental Streaming Functions
**************************************/
-void* LZ4_create (const char* inputBuffer);
-int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize);
-int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize);
-char* LZ4_slideInputBuffer (void* LZ4_Data);
-int LZ4_free (void* LZ4_Data);
+#define LZ4_DICTSIZE_U32 ((1 << (LZ4_MEMORY_USAGE-2)) + 6)
+#define LZ4_DICTSIZE (LZ4_DICTSIZE_U32 * sizeof(unsigned int))
/*
-These functions allow the compression of chained blocks, where each block benefits from prior 64 KB within preceding blocks.
-In order to achieve this, it is necessary to start creating the LZ4 Data Structure, thanks to the function :
-
-void* LZ4_create (const char* inputBuffer);
-The result of the function is the (void*) pointer on the LZ4 Data Structure.
-This pointer will be needed in all other functions.
-If the pointer returned is NULL, then the allocation has failed, and compression must be aborted.
-The only parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
-The input buffer must be already allocated, and size at least 192KB.
-'inputBuffer' will also be the 'const char* source' of the first block.
-
-All blocks are expected to lay next to each other within the input buffer, starting from 'inputBuffer'.
-To compress each block, use either LZ4_compress_continue() or LZ4_compress_limitedOutput_continue().
-Their behavior are identical to LZ4_compress() or LZ4_compress_limitedOutput(),
-but require the LZ4 Data Structure as their first argument, and check that each block starts right after the previous one.
-If next block does not begin immediately after the previous one, the compression will fail (return 0).
-
-When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to :
-char* LZ4_slideInputBuffer(void* LZ4_Data);
-must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer.
-Note that, for this function to work properly, minimum size of an input buffer must be 192KB.
-==> The memory position where the next input data block must start is provided as the result of the function.
-
-Compression can then resume, using LZ4_compress_continue() or LZ4_compress_limitedOutput_continue(), as usual.
-
-When compression is completed, a call to LZ4_free() will release the memory used by the LZ4 Data Structure.
-*/
-
+ * LZ4_dict_t
+ * information structure to track an LZ4 stream
+ * set it to zero (memset()) before first use/
+ */
+typedef struct { unsigned int table[LZ4_DICTSIZE_U32]; } LZ4_dict_t;
/*
-The following functions achieve the same result as :
-void* LZ4_create (const char* inputBuffer);
+ * LZ4_compress_usingDict
+ * Compress data block 'source', using blocks compressed before (with the same function) to improve compression ratio
+ * Previous data blocks are assumed to still be present at their previous location.
+ */
+int LZ4_compress_usingDict (LZ4_dict_t* LZ4_dict, const char* source, char* dest, int inputSize);
+//int LZ4_compress_limitedOutput_usingDict (LZ4_dict_t* LZ4_dict, const char* source, char* dest, int inputSize, int maxOutputSize);
-They are provided here to allow the user program to allocate memory using its own routines.
+/*
+ * LZ4_setDictPos
+ * If previous data blocks cannot be guaranteed to remain at their previous location in memory
+ * save them into a safe place, and
+ * use this function to indicate where to find them.
+ * Return : 1 if OK, 0 if error
+ */
+int LZ4_setDictPos (LZ4_dict_t* LZ4_dict, const char* dictionary, int dictSize);
-To know how much space must be allocated, use LZ4_sizeofStreamState();
-Note also that space must be 4-bytes aligned.
+/*
+ * LZ4_loadDict
+ * Use this function to load a static dictionary into LZ4_dict.
+ * It will be used to improve compression of next chained block.
+ * Return : 1 if OK, 0 if error
+ */
+int LZ4_loadDict (LZ4_dict_t* LZ4_dict, const char* dictionary, int dictSize);
-Once space is allocated, you must initialize it using : LZ4_resetStreamState(void* state, const char* inputBuffer);
-void* state is a pointer to the space allocated.
-It must be aligned on 4-bytes boundaries, and be large enough.
-The parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
-The input buffer must be already allocated, and size at least 192KB.
-'inputBuffer' will also be the 'const char* source' of the first block.
-The same space can be re-used multiple times, just by initializing it each time with LZ4_resetStreamState().
-return value of LZ4_resetStreamState() must be 0 is OK.
-Any other value means there was an error (typically, pointer is not aligned on 4-bytes boundaries).
+/*
+*_usingDict() :
+ These decoding functions work the same as their "normal" versions,
+ but can also use up to 64KB of dictionary data (dictStart, dictSize)
+ to decode chained blocks.
*/
-int LZ4_sizeofStreamState(void);
-int LZ4_resetStreamState(void* state, const char* inputBuffer);
-
+int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize);
+int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
/*
*_withPrefix64k() :
@@ -233,28 +226,23 @@ int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int origi
/**************************************
- Experimental Functions
-**************************************/
-/*
-*_withDict() :
- These decoding functions work the same as their "normal" versions,
- but can also use up to 64KB of dictionary data
- to decode chained blocks.
-*/
-int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize);
-int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
-
-
-
-/**************************************
Obsolete Functions
**************************************/
/*
These functions are deprecated and should no longer be used.
They are provided here for compatibility with existing user programs.
*/
-int LZ4_uncompress (const char* source, char* dest, int outputSize);
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+int LZ4_uncompress (const char* source, char* dest, int outputSize);
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+
+/* Obsolete streaming functions */
+void* LZ4_create (const char* inputBuffer);
+int LZ4_sizeofStreamState(void);
+int LZ4_resetStreamState(void* state, const char* inputBuffer);
+int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize);
+int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize);
+char* LZ4_slideInputBuffer (void* LZ4_Data);
+int LZ4_free (void* LZ4_Data);
#if defined (__cplusplus)
diff --git a/programs/Makefile b/programs/Makefile
index c0d6d15..a81a701 100644
--- a/programs/Makefile
+++ b/programs/Makefile
@@ -42,15 +42,6 @@ MANDIR=$(PREFIX)/share/man/man1
LZ4DIR=..
TEST_FILES = COPYING
-TEST_TARGETS=test-32 test-64
-
-# Minimize test target for Travis CI's Build Matrix
-ifeq ($(LZ4_TRAVIS_CI_ENV),-m32)
-TEST_TARGETS=test-32
-else ifeq ($(LZ4_TRAVIS_CI_ENV),-m64)
-TEST_TARGETS=test-64
-endif
-
# Define *.exe as extension for Windows systems
ifneq (,$(filter Windows%,$(OS)))
@@ -115,11 +106,7 @@ uninstall:
[ -f $(DESTDIR)$(MANDIR)/lz4cat.1 ] && rm -f $(DESTDIR)$(MANDIR)/lz4cat.1
@echo lz4 successfully uninstalled
-test: $(TEST_TARGETS)
-
-test-32: test-lz4 test-lz4c32 test-fullbench32 test-fuzzer32
-
-test-64: test-lz4 test-lz4c test-fullbench test-fuzzer
+test: test-lz4 test-lz4c test-lz4c32 test-fullbench test-fullbench32 test-fuzzer test-fuzzer32
test-lz4:
@@ -128,10 +115,10 @@ test-lz4c:
test-lz4c32:
test-fullbench: fullbench
- ./fullbench --no-prompt $(TEST_FILES)
+ ./fullbench --no-prompt -i1 $(TEST_FILES)
test-fullbench32: fullbench32
- ./fullbench32 --no-prompt $(TEST_FILES)
+ ./fullbench32 --no-prompt -i1 $(TEST_FILES)
test-fuzzer: fuzzer
./fuzzer --no-prompt
diff --git a/programs/fullbench.c b/programs/fullbench.c
index 1200ca0..01b807c 100644
--- a/programs/fullbench.c
+++ b/programs/fullbench.c
@@ -281,6 +281,21 @@ static inline int local_LZ4_compress_limitedOutput_continue(const char* in, char
return LZ4_compress_limitedOutput_continue(ctx, in, out, inSize, LZ4_compressBound(inSize));
}
+
+LZ4_dict_t LZ4_dict;
+static inline void* local_LZ4_resetDictT(const char* fake)
+{
+ (void)fake;
+ memset(&LZ4_dict, 0, sizeof(LZ4_dict_t));
+ return NULL;
+}
+
+static inline int local_LZ4_compress_usingDict(const char* in, char* out, int inSize)
+{
+ return LZ4_compress_usingDict(&LZ4_dict, in, out, inSize);
+}
+
+
static void* stateLZ4HC;
static inline int local_LZ4_compressHC_withStateHC(const char* in, char* out, int inSize)
{
@@ -344,15 +359,9 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
{
int fileIdx=0;
char* orig_buff;
-# define NB_COMPRESSION_ALGORITHMS 12
+# define NB_COMPRESSION_ALGORITHMS 13
# define MINCOMPRESSIONCHAR '0'
# define MAXCOMPRESSIONCHAR (MINCOMPRESSIONCHAR + NB_COMPRESSION_ALGORITHMS)
- static char* compressionNames[] = { "LZ4_compress", "LZ4_compress_limitedOutput",
- "LZ4_compress_withState", "LZ4_compress_limitedOutput_withState",
- "LZ4_compress_continue", "LZ4_compress_limitedOutput_continue",
- "LZ4_compressHC", "LZ4_compressHC_limitedOutput",
- "LZ4_compressHC_withStateHC", "LZ4_compressHC_limitedOutput_withStateHC",
- "LZ4_compressHC_continue", "LZ4_compressHC_limitedOutput_continue" };
double totalCTime[NB_COMPRESSION_ALGORITHMS] = {0};
double totalCSize[NB_COMPRESSION_ALGORITHMS] = {0};
# define NB_DECOMPRESSION_ALGORITHMS 7
@@ -465,29 +474,30 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
DISPLAY(" %s : \n", inFileName);
// Compression Algorithms
- for (cAlgNb=0; (cAlgNb < NB_COMPRESSION_ALGORITHMS) && (compressionTest); cAlgNb++)
+ for (cAlgNb=1; (cAlgNb <= NB_COMPRESSION_ALGORITHMS) && (compressionTest); cAlgNb++)
{
- char* cName = compressionNames[cAlgNb];
+ char* compressorName;
int (*compressionFunction)(const char*, char*, int);
void* (*initFunction)(const char*) = NULL;
double bestTime = 100000000.;
- if ((compressionAlgo != ALL_COMPRESSORS) && (compressionAlgo != cAlgNb+1)) continue;
+ if ((compressionAlgo != ALL_COMPRESSORS) && (compressionAlgo != cAlgNb)) continue;
switch(cAlgNb)
{
- case 0 : compressionFunction = LZ4_compress; break;
- case 1 : compressionFunction = local_LZ4_compress_limitedOutput; break;
- case 2 : compressionFunction = local_LZ4_compress_withState; break;
- case 3 : compressionFunction = local_LZ4_compress_limitedOutput_withState; break;
- case 4 : compressionFunction = local_LZ4_compress_continue; initFunction = LZ4_create; break;
- case 5 : compressionFunction = local_LZ4_compress_limitedOutput_continue; initFunction = LZ4_create; break;
- case 6 : compressionFunction = LZ4_compressHC; break;
- case 7 : compressionFunction = local_LZ4_compressHC_limitedOutput; break;
- case 8 : compressionFunction = local_LZ4_compressHC_withStateHC; break;
- case 9 : compressionFunction = local_LZ4_compressHC_limitedOutput_withStateHC; break;
- case 10: compressionFunction = local_LZ4_compressHC_continue; initFunction = LZ4_createHC; break;
- case 11: compressionFunction = local_LZ4_compressHC_limitedOutput_continue; initFunction = LZ4_createHC; break;
+ case 1 : compressionFunction = LZ4_compress; compressorName = "LZ4_compress"; break;
+ case 2 : compressionFunction = local_LZ4_compress_limitedOutput; compressorName = "LZ4_compress_limitedOutput"; break;
+ case 3 : compressionFunction = local_LZ4_compress_withState; compressorName = "LZ4_compress_withState"; break;
+ case 4 : compressionFunction = local_LZ4_compress_limitedOutput_withState; compressorName = "LZ4_compress_limitedOutput_withState"; break;
+ case 5 : compressionFunction = local_LZ4_compress_continue; initFunction = LZ4_create; compressorName = "LZ4_compress_continue"; break;
+ case 6 : compressionFunction = local_LZ4_compress_limitedOutput_continue; initFunction = LZ4_create; compressorName = "LZ4_compress_limitedOutput_continue"; break;
+ case 7 : compressionFunction = LZ4_compressHC; compressorName = "LZ4_compressHC"; break;
+ case 8 : compressionFunction = local_LZ4_compressHC_limitedOutput; compressorName = "LZ4_compressHC_limitedOutput"; break;
+ case 9 : compressionFunction = local_LZ4_compressHC_withStateHC; compressorName = "LZ4_compressHC_withStateHC"; break;
+ case 10: compressionFunction = local_LZ4_compressHC_limitedOutput_withStateHC; compressorName = "LZ4_compressHC_limitedOutput_withStateHC"; break;
+ case 11: compressionFunction = local_LZ4_compressHC_continue; initFunction = LZ4_createHC; compressorName = "LZ4_compressHC_continue"; break;
+ case 12: compressionFunction = local_LZ4_compressHC_limitedOutput_continue; initFunction = LZ4_createHC; compressorName = "LZ4_compressHC_limitedOutput_continue"; break;
+ case 13: compressionFunction = local_LZ4_compress_usingDict; initFunction = local_LZ4_resetDictT; compressorName = "LZ4_compress_usingDict"; break;
default : DISPLAY("ERROR ! Bad algorithm Id !! \n"); free(chunkP); return 1;
}
@@ -496,7 +506,7 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
double averageTime;
int milliTime;
- PROGRESS("%1i-%-21.21s : %9i ->\r", loopNb, cName, (int)benchedSize);
+ PROGRESS("%1i-%-25.25s : %9i ->\r", loopNb, compressorName, (int)benchedSize);
{ size_t i; for (i=0; i<benchedSize; i++) compressed_buff[i]=(char)i; } // warmimg up memory
nb_loops = 0;
@@ -509,7 +519,7 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
for (chunkNb=0; chunkNb<nbChunks; chunkNb++)
{
chunkP[chunkNb].compressedSize = compressionFunction(chunkP[chunkNb].origBuffer, chunkP[chunkNb].compressedBuffer, chunkP[chunkNb].origSize);
- if (chunkP[chunkNb].compressedSize==0) DISPLAY("ERROR ! %s() = 0 !! \n", cName), exit(1);
+ if (chunkP[chunkNb].compressedSize==0) DISPLAY("ERROR ! %s() = 0 !! \n", compressorName), exit(1);
}
if (initFunction!=NULL) free(ctx);
nb_loops++;
@@ -520,13 +530,13 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
if (averageTime < bestTime) bestTime = averageTime;
cSize=0; for (chunkNb=0; chunkNb<nbChunks; chunkNb++) cSize += chunkP[chunkNb].compressedSize;
ratio = (double)cSize/(double)benchedSize*100.;
- PROGRESS("%1i-%-21.21s : %9i -> %9i (%5.2f%%),%7.1f MB/s\r", loopNb, cName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
+ PROGRESS("%1i-%-25.25s : %9i -> %9i (%5.2f%%),%7.1f MB/s\r", loopNb, compressorName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
}
if (ratio<100.)
- DISPLAY("%-23.23s : %9i -> %9i (%5.2f%%),%7.1f MB/s\n", cName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
+ DISPLAY("%-27.27s : %9i -> %9i (%5.2f%%),%7.1f MB/s\n", compressorName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
else
- DISPLAY("%-23.23s : %9i -> %9i (%5.1f%%),%7.1f MB/s\n", cName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
+ DISPLAY("%-27.27s : %9i -> %9i (%5.1f%%),%7.1f MB/s\n", compressorName, (int)benchedSize, (int)cSize, ratio, (double)benchedSize / bestTime / 1000.);
totalCTime[cAlgNb] += bestTime;
totalCSize[cAlgNb] += cSize;
@@ -536,7 +546,7 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
for (chunkNb=0; chunkNb<nbChunks; chunkNb++)
{
chunkP[chunkNb].compressedSize = LZ4_compress(chunkP[chunkNb].origBuffer, chunkP[chunkNb].compressedBuffer, chunkP[chunkNb].origSize);
- if (chunkP[chunkNb].compressedSize==0) DISPLAY("ERROR ! %s() = 0 !! \n", compressionNames[0]), exit(1);
+ if (chunkP[chunkNb].compressedSize==0) DISPLAY("ERROR ! %s() = 0 !! \n", "LZ4_compress"), exit(1);
}
{ size_t i; for (i=0; i<benchedSize; i++) orig_buff[i]=0; } // zeroing source area, for CRC checking
@@ -607,6 +617,7 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
free(chunkP);
}
+/*
if (nbFiles > 1)
{
int AlgNb;
@@ -625,6 +636,7 @@ int fullSpeedBench(char** fileNamesTable, int nbFiles)
DISPLAY("%-31.31s :%10llu -> %6.1f MB/s\n", dName, (long long unsigned int)totals, (double)totals/totalDTime[AlgNb]/1000.);
}
}
+*/
if (BMK_pause) { printf("press enter...\n"); getchar(); }
diff --git a/programs/fuzzer.c b/programs/fuzzer.c
index 1ea14f6..081f0df 100644
--- a/programs/fuzzer.c
+++ b/programs/fuzzer.c
@@ -187,7 +187,7 @@ int FUZ_SecurityTest()
#define FUZ_MAX(a,b) (a>b?a:b)
-int FUZ_test(U32 seed, int nbTests, double compressibility) {
+int FUZ_test(U32 seed, int nbCycles, int startCycle, double compressibility) {
unsigned long long bytes = 0;
unsigned long long cbytes = 0;
unsigned long long hcbytes = 0;
@@ -197,10 +197,10 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
char* decodedBuffer;
# define FUZ_max LZ4_COMPRESSBOUND(LEN)
unsigned int randState=seed;
- int ret, attemptNb;
+ int ret, cycleNb;
# define FUZ_CHECKTEST(cond, ...) if (cond) { printf("Test %i : ", testNb); printf(__VA_ARGS__); \
- printf(" (seed %u, cycle %i) \n", seed, attemptNb); goto _output_error; }
-# define FUZ_DISPLAYTEST testNb++; no_prompt ? 0 : printf("%2i\b\b", testNb);
+ printf(" (seed %u, cycle %i) \n", seed, cycleNb); goto _output_error; }
+# define FUZ_DISPLAYTEST { testNb++; no_prompt ? 0 : printf("%2i\b\b", testNb); }
void* stateLZ4 = malloc(LZ4_sizeofState());
void* stateLZ4HC = malloc(LZ4_sizeofStateHC());
void* LZ4continue;
@@ -213,8 +213,16 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
compressedBuffer = malloc(LZ4_compressBound(FUZ_MAX_BLOCK_SIZE));
decodedBuffer = malloc(FUZ_MAX_DICT_SIZE + FUZ_MAX_BLOCK_SIZE);
+ // move to startCycle
+ for (cycleNb = 0; cycleNb < startCycle; cycleNb++)
+ {
+ FUZ_rand(&randState);
+ FUZ_rand(&randState);
+ FUZ_rand(&randState);
+ }
+
// Test loop
- for (attemptNb = 0; attemptNb < nbTests; attemptNb++)
+ for (cycleNb = startCycle; cycleNb < nbCycles; cycleNb++)
{
int testNb = 0;
char* dict;
@@ -224,11 +232,11 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
// note : promptThrottle is throtting stdout to prevent
// Travis-CI's output limit (10MB) and false hangup detection.
- const int step = FUZ_MAX(1, nbTests / 100);
- const int promptThrottle = ((attemptNb % step) == 0);
- if (!no_prompt || attemptNb == 0 || promptThrottle)
+ const int step = FUZ_MAX(1, nbCycles / 100);
+ const int promptThrottle = ((cycleNb % step) == 0);
+ if (!no_prompt || cycleNb == 0 || promptThrottle)
{
- printf("\r%7i /%7i - ", attemptNb, nbTests);
+ printf("\r%7i /%7i - ", cycleNb, nbCycles);
if (no_prompt) fflush(stdout);
}
@@ -305,7 +313,7 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
ret = LZ4_decompress_safe(compressedBuffer, decodedBuffer, compressedSize, blockSize+1);
FUZ_CHECKTEST(ret<0, "LZ4_decompress_safe failed despite amply sufficient space");
FUZ_CHECKTEST(ret!=blockSize, "LZ4_decompress_safe did not regenerate original data");
- //FUZ_CHECKTEST(decodedBuffer[blockSize], "LZ4_decompress_safe wrote more than target size"); // well, is that an issue ?
+ //FUZ_CHECKTEST(decodedBuffer[blockSize], "LZ4_decompress_safe wrote more than (unknown) target size"); // well, is that an issue ?
FUZ_CHECKTEST(decodedBuffer[blockSize+1], "LZ4_decompress_safe overrun specified output buffer size");
crcCheck = XXH32(decodedBuffer, blockSize, 0);
FUZ_CHECKTEST(crcCheck!=crcOrig, "LZ4_decompress_safe corrupted decoded data");
@@ -409,6 +417,18 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
crcCheck = XXH32(decodedBuffer+dictSize, blockSize, 0);
FUZ_CHECKTEST(crcCheck!=crcOrig, "LZ4_decompress_safe_withPrefix64k corrupted decoded data");
+ // Compress using dictionary
+ FUZ_DISPLAYTEST;
+ dict -= 9;
+ if (dict < (char*)CNBuffer) dict = (char*)CNBuffer;
+ {
+ LZ4_dict_t LZ4dict;
+ memset(&LZ4dict, 0, sizeof(LZ4_dict_t));
+ LZ4_loadDict(&LZ4dict, dict, dictSize);
+ blockContinueCompressedSize = LZ4_compress_usingDict(&LZ4dict, block, compressedBuffer, blockSize);
+ FUZ_CHECKTEST(blockContinueCompressedSize==0, "LZ4_compress_usingDict failed");
+ }
+
// Decompress with dictionary as external
FUZ_DISPLAYTEST;
decodedBuffer[blockSize] = 0;
@@ -416,6 +436,13 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
FUZ_CHECKTEST(ret!=blockContinueCompressedSize, "LZ4_decompress_fast_usingDict did not read all compressed block input");
FUZ_CHECKTEST(decodedBuffer[blockSize], "LZ4_decompress_fast_usingDict overrun specified output buffer size")
crcCheck = XXH32(decodedBuffer, blockSize, 0);
+ if (crcCheck!=crcOrig)
+ {
+ int i=0;
+ while (block[i]==decodedBuffer[i]) i++;
+ printf("Wrong Byte at position %i/%i\n", i, blockSize);
+
+ }
FUZ_CHECKTEST(crcCheck!=crcOrig, "LZ4_decompress_fast_usingDict corrupted decoded data");
FUZ_DISPLAYTEST;
@@ -455,7 +482,7 @@ int FUZ_test(U32 seed, int nbTests, double compressibility) {
ccbytes += blockContinueCompressedSize;
}
- printf("\r%7i /%7i - ", attemptNb, nbTests);
+ printf("\r%7i /%7i - ", cycleNb, nbCycles);
printf("all tests completed successfully \n");
printf("compression ratio: %0.3f%%\n", (double)cbytes/bytes*100);
printf("HC compression ratio: %0.3f%%\n", (double)hcbytes/bytes*100);
@@ -489,6 +516,7 @@ int FUZ_usage()
DISPLAY( "Arguments :\n");
DISPLAY( " -i# : Nb of tests (default:%i) \n", NB_ATTEMPTS);
DISPLAY( " -s# : Select seed (default:prompt user)\n");
+ DISPLAY( " -t# : Select starting test number (default:0)\n");
DISPLAY( " -p# : Select compressibility in %% (default:%i%%)\n", FUZ_COMPRESSIBILITY_DEFAULT);
DISPLAY( " -h : display help and exit\n");
return 0;
@@ -502,6 +530,7 @@ int main(int argc, char** argv) {
int seedset=0;
int argNb;
int nbTests = NB_ATTEMPTS;
+ int testNb = 0;
int proba = FUZ_COMPRESSIBILITY_DEFAULT;
// Check command line
@@ -544,6 +573,16 @@ int main(int argc, char** argv) {
argument++;
}
break;
+ case 't':
+ argument++;
+ testNb=0;
+ while ((*argument>='0') && (*argument<='9'))
+ {
+ testNb *= 10;
+ testNb += *argument - '0';
+ argument++;
+ }
+ break;
case 'p':
argument++;
proba=0;
@@ -583,5 +622,5 @@ int main(int argc, char** argv) {
if (nbTests<=0) nbTests=1;
- return FUZ_test(seed, nbTests, ((double)proba) / 100);
+ return FUZ_test(seed, nbTests, testNb, ((double)proba) / 100);
}
diff --git a/programs/lz4.1 b/programs/lz4.1
index 298cbf6..6ae8d3c 100644
--- a/programs/lz4.1
+++ b/programs/lz4.1
@@ -64,6 +64,7 @@ following options
.TP
.B \-B#
block size [4-7](default : 7)
+ B4= 64KB ; B5= 256KB ; B6= 1MB ; B7= 4MB
.TP
.B \-BD
block dependency (improve compression ratio)
@@ -84,4 +85,4 @@ following options
Report bugs at:- https://code.google.com/p/lz4/
.SH AUTHOR
-Yann Collet \ No newline at end of file
+Yann Collet
diff --git a/programs/lz4cli.c b/programs/lz4cli.c
index 934c2bb..1c4e9de 100644
--- a/programs/lz4cli.c
+++ b/programs/lz4cli.c
@@ -405,9 +405,8 @@ int main(int argc, char** argv)
case '7':
{
int B = argument[1] - '0';
- int S = 1 << (8 + 2*B);
- BMK_SetBlocksize(S);
blockSize = LZ4IO_setBlockSizeID(B);
+ BMK_SetBlocksize(blockSize);
argument++;
break;
}