summaryrefslogtreecommitdiffstats
path: root/lz4hc.c
diff options
context:
space:
mode:
authoryann.collet.73@gmail.com <yann.collet.73@gmail.com@650e7d94-2a16-8b24-b05c-7c0b3f6821cd>2014-02-04 14:11:10 (GMT)
committeryann.collet.73@gmail.com <yann.collet.73@gmail.com@650e7d94-2a16-8b24-b05c-7c0b3f6821cd>2014-02-04 14:11:10 (GMT)
commit69dc85b8abe78246bea91a5ba1205e4c07b96a97 (patch)
tree0ed0250f8284861b90c2781aaa76172ebf97b6fd /lz4hc.c
parent96a60a52eb3e7d7a4785ad1dfc4e0abe3d0e10a1 (diff)
downloadlz4-69dc85b8abe78246bea91a5ba1205e4c07b96a97.zip
lz4-69dc85b8abe78246bea91a5ba1205e4c07b96a97.tar.gz
lz4-69dc85b8abe78246bea91a5ba1205e4c07b96a97.tar.bz2
Large decompression speed improvement for GCC 32-bits. Thanks to Valery Croizier !
LZ4HC : Compression Level is now a programmable parameter (CLI from 4 to 9) Separated IO routines from command line (lz4io.c) Version number into lz4.h (suggested by Francesc Alted) git-svn-id: https://lz4.googlecode.com/svn/trunk@113 650e7d94-2a16-8b24-b05c-7c0b3f6821cd
Diffstat (limited to 'lz4hc.c')
-rw-r--r--lz4hc.c306
1 files changed, 170 insertions, 136 deletions
diff --git a/lz4hc.c b/lz4hc.c
index f28283f..e84de2b 100644
--- a/lz4hc.c
+++ b/lz4hc.c
@@ -1,6 +1,6 @@
/*
LZ4 HC - High Compression Mode of LZ4
- Copyright (C) 2011-2013, Yann Collet.
+ Copyright (C) 2011-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
@@ -31,31 +31,41 @@
- LZ4 source repository : http://code.google.com/p/lz4/
*/
-//**************************************
-// Memory routines
-//**************************************
-#include <stdlib.h> // calloc, free
+
+
+/**************************************
+ Tuning Parameter
+**************************************/
+#define LZ4HC_DEFAULT_COMPRESSIONLEVEL 8
+
+
+/**************************************
+ Memory routines
+**************************************/
+#include <stdlib.h> /* calloc, free */
#define ALLOCATOR(s) calloc(1,s)
#define FREEMEM free
-#include <string.h> // memset, memcpy
+#include <string.h> /* memset, memcpy */
#define MEM_INIT memset
-//**************************************
-// CPU Feature Detection
-//**************************************
-// 32 or 64 bits ?
+/**************************************
+ CPU Feature Detection
+**************************************/
+/* 32 or 64 bits ? */
#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
|| defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
|| defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
- || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
+ || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
# define LZ4_ARCH64 1
#else
# define LZ4_ARCH64 0
#endif
-// Little Endian or Big Endian ?
-// Overwrite the #define below if you know your architecture endianess
+/*
+ * Little Endian or Big Endian ?
+ * Overwrite the #define below if you know your architecture endianess
+ */
#if defined (__GLIBC__)
# include <endian.h>
# if (__BYTE_ORDER == __BIG_ENDIAN)
@@ -69,43 +79,45 @@
|| defined(_MIPSEB) || defined(__s390__)
# define LZ4_BIG_ENDIAN 1
#else
-// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
+/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
#endif
-// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
-// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
-// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+/*
+ * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+ * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
+ * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+ */
#if defined(__ARM_FEATURE_UNALIGNED)
# define LZ4_FORCE_UNALIGNED_ACCESS 1
#endif
-// Define this parameter if your target system or compiler does not support hardware bit count
-#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
+/* Define this parameter if your target system or compiler does not support hardware bit count */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
# define LZ4_FORCE_SW_BITCOUNT
#endif
-//**************************************
-// Compiler Options
-//**************************************
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
- /* "restrict" is a known keyword */
+/**************************************
+ Compiler Options
+**************************************/
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
+/* "restrict" is a known keyword */
#else
-# define restrict // Disable restrict
+# define restrict /* Disable restrict */
#endif
-#ifdef _MSC_VER // Visual Studio
+#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
-# include <intrin.h> // For Visual 2005
-# if LZ4_ARCH64 // 64-bits
-# pragma intrinsic(_BitScanForward64) // For Visual 2005
-# pragma intrinsic(_BitScanReverse64) // For Visual 2005
-# else // 32-bits
-# pragma intrinsic(_BitScanForward) // For Visual 2005
-# pragma intrinsic(_BitScanReverse) // For Visual 2005
+# include <intrin.h> /* For Visual 2005 */
+# if LZ4_ARCH64 /* 64-bits */
+# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
+# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
+# else /* 32-bits */
+# pragma intrinsic(_BitScanForward) /* For Visual 2005 */
+# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
# endif
-# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
-# pragma warning(disable : 4701) // disable: C4701: potentially uninitialized local variable used
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4701) /* disable: C4701: potentially uninitialized local variable used */
#else
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
@@ -114,24 +126,24 @@
# endif
#endif
-#ifdef _MSC_VER // Visual Studio
+#ifdef _MSC_VER /* Visual Studio */
# define lz4_bswap16(x) _byteswap_ushort(x)
#else
# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
#endif
-//**************************************
-// Includes
-//**************************************
+/**************************************
+ Includes
+**************************************/
#include "lz4hc.h"
#include "lz4.h"
-//**************************************
-// Basic Types
-//**************************************
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
+/**************************************
+ Basic Types
+**************************************/
+#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
@@ -173,9 +185,9 @@ typedef struct _U64_S { U64 v; } _PACKED U64_S;
#define A16(x) (((U16_S *)(x))->v)
-//**************************************
-// Constants
-//**************************************
+/**************************************
+ Constants
+**************************************/
#define MINMATCH 4
#define DICTIONARY_LOGSIZE 16
@@ -187,8 +199,6 @@ typedef struct _U64_S { U64 v; } _PACKED U64_S;
#define HASHTABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASHTABLESIZE - 1)
-#define MAX_NB_ATTEMPTS 256
-
#define ML_BITS 4
#define ML_MASK (size_t)((1U<<ML_BITS)-1)
#define RUN_BITS (8-ML_BITS)
@@ -205,25 +215,21 @@ typedef struct _U64_S { U64 v; } _PACKED U64_S;
#define GB *(1U<<30)
-//**************************************
-// Architecture-specific macros
-//**************************************
-#if LZ4_ARCH64 // 64-bit
+/**************************************
+ Architecture-specific macros
+**************************************/
+#if LZ4_ARCH64 /* 64-bit */
# define STEPSIZE 8
# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
-# define UARCH U64
# define AARCH A64
# define HTYPE U32
# define INITBASE(b,s) const BYTE* const b = s
-#else // 32-bit
+#else /* 32-bit */
# define STEPSIZE 4
# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
-# define UARCH U32
# define AARCH A32
-//# define HTYPE const BYTE*
-//# define INITBASE(b,s) const int b = 0
# define HTYPE U32
# define INITBASE(b,s) const BYTE* const b = s
#endif
@@ -231,15 +237,15 @@ typedef struct _U64_S { U64 v; } _PACKED U64_S;
#if defined(LZ4_BIG_ENDIAN)
# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
-#else // Little Endian
+#else /* Little Endian */
# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
#endif
-//************************************************************
-// Local Types
-//************************************************************
+/**************************************
+ Local Types
+**************************************/
typedef struct
{
const BYTE* inputBuffer;
@@ -251,9 +257,9 @@ typedef struct
} LZ4HC_Data_Structure;
-//**************************************
-// Macros
-//**************************************
+/**************************************
+ Macros
+**************************************/
#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
@@ -263,9 +269,9 @@ typedef struct
#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p))
-//**************************************
-// Private functions
-//**************************************
+/**************************************
+ Private functions
+**************************************/
#if LZ4_ARCH64
FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
@@ -349,7 +355,7 @@ FORCE_INLINE void LZ4_initHC (LZ4HC_Data_Structure* hc4, const BYTE* base)
int LZ4_resetStreamStateHC(void* state, const char* inputBuffer)
{
- if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; // Error : pointer is not aligned for pointer (32 or 64 bits)
+ if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */
LZ4_initHC((LZ4HC_Data_Structure*)state, (const BYTE*)inputBuffer);
return 0;
}
@@ -370,7 +376,7 @@ int LZ4_freeHC (void* LZ4HC_Data)
}
-// Update chains up to ip (excluded)
+/* Update chains up to ip (excluded) */
FORCE_INLINE void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip)
{
U16* chainTable = hc4->chainTable;
@@ -393,12 +399,12 @@ char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
{
LZ4HC_Data_Structure* hc4 = (LZ4HC_Data_Structure*)LZ4HC_Data;
U32 distance = (U32)(hc4->end - hc4->inputBuffer) - 64 KB;
- distance = (distance >> 16) << 16; // Must be a multiple of 64 KB
+ distance = (distance >> 16) << 16; /* Must be a multiple of 64 KB */
LZ4HC_Insert(hc4, hc4->end - MINMATCH);
memcpy((void*)(hc4->end - 64 KB - distance), (const void*)(hc4->end - 64 KB), 64 KB);
hc4->nextToUpdate -= distance;
hc4->base -= distance;
- if ((U32)(hc4->inputBuffer - hc4->base) > 1 GB + 64 KB) // Avoid overflow
+ if ((U32)(hc4->inputBuffer - hc4->base) > 1 GB + 64 KB) /* Avoid overflow */
{
int i;
hc4->base += 1 GB;
@@ -415,7 +421,7 @@ FORCE_INLINE size_t LZ4HC_CommonLength (const BYTE* p1, const BYTE* p2, const BY
while (p1t<matchlimit-(STEPSIZE-1))
{
- UARCH diff = AARCH(p2) ^ AARCH(p1t);
+ size_t diff = AARCH(p2) ^ AARCH(p1t);
if (!diff) { p1t+=STEPSIZE; p2+=STEPSIZE; continue; }
p1t += LZ4_NbCommonBytes(diff);
return (p1t - p1);
@@ -427,26 +433,26 @@ FORCE_INLINE size_t LZ4HC_CommonLength (const BYTE* p1, const BYTE* p2, const BY
}
-FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos)
+FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos, const int maxNbAttempts)
{
U16* const chainTable = hc4->chainTable;
HTYPE* const HashTable = hc4->hashTable;
const BYTE* ref;
INITBASE(base,hc4->base);
- int nbAttempts=MAX_NB_ATTEMPTS;
+ int nbAttempts=maxNbAttempts;
size_t repl=0, ml=0;
- U16 delta=0; // useless assignment, to remove an uninitialization warning
+ U16 delta=0; /* useless assignment, to remove an uninitialization warning */
- // HC4 match finder
+ /* HC4 match finder */
LZ4HC_Insert(hc4, ip);
ref = HASH_POINTER(ip);
#define REPEAT_OPTIMIZATION
#ifdef REPEAT_OPTIMIZATION
- // Detect repetitive sequences of length <= 4
- if ((U32)(ip-ref) <= 4) // potential repetition
+ /* Detect repetitive sequences of length <= 4 */
+ if ((U32)(ip-ref) <= 4) /* potential repetition */
{
- if (A32(ref) == A32(ip)) // confirmed
+ if (A32(ref) == A32(ip)) /* confirmed */
{
delta = (U16)(ip-ref);
repl = ml = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH;
@@ -469,7 +475,7 @@ FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
}
#ifdef REPEAT_OPTIMIZATION
- // Complete table
+ /* Complete table */
if (repl)
{
const BYTE* ptr = ip;
@@ -478,13 +484,13 @@ FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
end = ip + repl - (MINMATCH-1);
while(ptr < end-delta)
{
- DELTANEXT(ptr) = delta; // Pre-Load
+ DELTANEXT(ptr) = delta; /* Pre-Load */
ptr++;
}
do
{
DELTANEXT(ptr) = delta;
- HashTable[HASH_VALUE(ptr)] = (HTYPE)((ptr) - base); // Head of chain
+ HashTable[HASH_VALUE(ptr)] = (HTYPE)((ptr) - base); /* Head of chain */
ptr++;
} while(ptr < end);
hc4->nextToUpdate = end;
@@ -495,16 +501,16 @@ FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
}
-FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos)
+FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos, const int maxNbAttempts)
{
U16* const chainTable = hc4->chainTable;
HTYPE* const HashTable = hc4->hashTable;
INITBASE(base,hc4->base);
const BYTE* ref;
- int nbAttempts = MAX_NB_ATTEMPTS;
+ int nbAttempts = maxNbAttempts;
int delta = (int)(ip-startLimit);
- // First Match
+ /* First Match */
LZ4HC_Insert(hc4, ip);
ref = HASH_POINTER(ip);
@@ -521,7 +527,7 @@ FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const
while (ipt<matchlimit-(STEPSIZE-1))
{
- UARCH diff = AARCH(reft) ^ AARCH(ipt);
+ size_t diff = AARCH(reft) ^ AARCH(ipt);
if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
ipt += LZ4_NbCommonBytes(diff);
goto _endCount;
@@ -532,7 +538,7 @@ FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const
_endCount:
reft = ref;
#else
- // Easier for code maintenance, but unfortunately slower too
+ /* Easier for code maintenance, but unfortunately slower too */
const BYTE* startt = ip;
const BYTE* reft = ref;
const BYTE* ipt = ip + MINMATCH + LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit);
@@ -568,26 +574,26 @@ FORCE_INLINE int LZ4HC_encodeSequence (
int length;
BYTE* token;
- // Encode Literal length
+ /* Encode Literal length */
length = (int)(*ip - *anchor);
token = (*op)++;
- if ((limitedOutputBuffer) && ((*op + length + (2 + 1 + LASTLITERALS) + (length>>8)) > oend)) return 1; // Check output limit
+ if ((limitedOutputBuffer) && ((*op + length + (2 + 1 + LASTLITERALS) + (length>>8)) > oend)) return 1; /* Check output limit */
if (length>=(int)RUN_MASK) { int len; *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
else *token = (BYTE)(length<<ML_BITS);
- // Copy Literals
+ /* Copy Literals */
LZ4_BLINDCOPY(*anchor, *op, length);
- // Encode Offset
+ /* Encode Offset */
LZ4_WRITE_LITTLEENDIAN_16(*op,(U16)(*ip-ref));
- // Encode MatchLength
+ /* Encode MatchLength */
length = (int)(matchLength-MINMATCH);
- if ((limitedOutputBuffer) && (*op + (1 + LASTLITERALS) + (length>>8) > oend)) return 1; // Check output limit
+ if ((limitedOutputBuffer) && (*op + (1 + LASTLITERALS) + (length>>8) > oend)) return 1; /* Check output limit */
if (length>=(int)ML_MASK) { *token+=ML_MASK; length-=ML_MASK; for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (length > 254) { length-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)length; }
else *token += (BYTE)(length);
- // Prepare next loop
+ /* Prepare next loop */
*ip += matchLength;
*anchor = *ip;
@@ -595,12 +601,14 @@ FORCE_INLINE int LZ4HC_encodeSequence (
}
+#define MAX_COMPRESSION_LEVEL 16
static int LZ4HC_compress_generic (
void* ctxvoid,
const char* source,
char* dest,
int inputSize,
int maxOutputSize,
+ int compressionLevel,
limitedOutput_directive limit
)
{
@@ -614,6 +622,7 @@ static int LZ4HC_compress_generic (
BYTE* op = (BYTE*) dest;
BYTE* const oend = op + maxOutputSize;
+ const int maxNbAttempts = compressionLevel > MAX_COMPRESSION_LEVEL ? 1 << MAX_COMPRESSION_LEVEL : compressionLevel ? 1<<(compressionLevel-1) : 1<<LZ4HC_DEFAULT_COMPRESSIONLEVEL;
int ml, ml2, ml3, ml0;
const BYTE* ref=NULL;
const BYTE* start2=NULL;
@@ -624,29 +633,29 @@ static int LZ4HC_compress_generic (
const BYTE* ref0;
- // Ensure blocks follow each other
+ /* Ensure blocks follow each other */
if (ip != ctx->end) return 0;
ctx->end += inputSize;
ip++;
- // Main Loop
+ /* Main Loop */
while (ip < mflimit)
{
- ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref));
+ ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts);
if (!ml) { ip++; continue; }
- // saved, in case we would skip too much
+ /* saved, in case we would skip too much */
start0 = ip;
ref0 = ref;
ml0 = ml;
_Search2:
if (ip+ml < mflimit)
- ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2);
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2, maxNbAttempts);
else ml2 = ml;
- if (ml2 == ml) // No better match
+ if (ml2 == ml) /* No better match */
{
if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
continue;
@@ -654,7 +663,7 @@ _Search2:
if (start0 < ip)
{
- if (start2 < ip + ml0) // empirical
+ if (start2 < ip + ml0) /* empirical */
{
ip = start0;
ref = ref0;
@@ -662,8 +671,8 @@ _Search2:
}
}
- // Here, start0==ip
- if ((start2 - ip) < 3) // First Match too small : removed
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) /* First Match too small : removed */
{
ml = ml2;
ip = start2;
@@ -672,9 +681,11 @@ _Search2:
}
_Search3:
- // Currently we have :
- // ml2 > ml1, and
- // ip1+3 <= ip2 (usually < ip1+ml1)
+ /*
+ * Currently we have :
+ * ml2 > ml1, and
+ * ip1+3 <= ip2 (usually < ip1+ml1)
+ */
if ((start2 - ip) < OPTIMAL_ML)
{
int correction;
@@ -689,26 +700,26 @@ _Search3:
ml2 -= correction;
}
}
- // Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18)
+ /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
if (start2 + ml2 < mflimit)
- ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3);
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts);
else ml3 = ml2;
- if (ml3 == ml2) // No better match : 2 sequences to encode
+ if (ml3 == ml2) /* No better match : 2 sequences to encode */
{
- // ip & ref are known; Now for ml
+ /* ip & ref are known; Now for ml */
if (start2 < ip+ml) ml = (int)(start2 - ip);
- // Now, encode 2 sequences
+ /* Now, encode 2 sequences */
if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
ip = start2;
if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) return 0;
continue;
}
- if (start3 < ip+ml+3) // Not enough space for match 2 : remove it
+ if (start3 < ip+ml+3) /* Not enough space for match 2 : remove it */
{
- if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
+ if (start3 >= (ip+ml)) /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
{
if (start2 < ip+ml)
{
@@ -741,8 +752,10 @@ _Search3:
goto _Search3;
}
- // OK, now we have 3 ascending matches; let's write at least the first one
- // ip & ref are known; Now for ml
+ /*
+ * OK, now we have 3 ascending matches; let's write at least the first one
+ * ip & ref are known; Now for ml
+ */
if (start2 < ip+ml)
{
if ((start2 - ip) < (int)ML_MASK)
@@ -777,80 +790,101 @@ _Search3:
}
- // Encode Last Literals
+ /* Encode Last Literals */
{
int lastRun = (int)(iend - anchor);
- if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; // Check output limit
+ if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */
if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
else *op++ = (BYTE)(lastRun<<ML_BITS);
memcpy(op, anchor, iend - anchor);
op += iend-anchor;
}
- // End
+ /* End */
return (int) (((char*)op)-dest);
}
-int LZ4_compressHC(const char* source, char* dest, int inputSize)
+int LZ4_compressHC2(const char* source, char* dest, int inputSize, int compressionLevel)
{
void* ctx = LZ4_createHC(source);
int result;
if (ctx==NULL) return 0;
- result = LZ4HC_compress_generic (ctx, source, dest, inputSize, 0, noLimit);
+ result = LZ4HC_compress_generic (ctx, source, dest, inputSize, 0, compressionLevel, noLimit);
LZ4_freeHC(ctx);
return result;
}
-int LZ4_compressHC_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+int LZ4_compressHC(const char* source, char* dest, int inputSize) { return LZ4_compressHC2(source, dest, inputSize, 0); }
+
+int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
{
void* ctx = LZ4_createHC(source);
int result;
if (ctx==NULL) return 0;
- result = LZ4HC_compress_generic (ctx, source, dest, inputSize, maxOutputSize, limitedOutput);
+ result = LZ4HC_compress_generic (ctx, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
LZ4_freeHC(ctx);
return result;
}
+int LZ4_compressHC_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compressHC2_limitedOutput(source, dest, inputSize, maxOutputSize, 0);
+}
-//*****************************
-// Using an external allocation
-//*****************************
+/*****************************
+ Using external allocation
+*****************************/
int LZ4_sizeofStateHC() { return sizeof(LZ4HC_Data_Structure); }
-int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize)
+int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel)
{
- if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; // Error : state is not aligned for pointers (32 or 64 bits)
+ if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
LZ4_initHC ((LZ4HC_Data_Structure*)state, (const BYTE*)source);
- return LZ4HC_compress_generic (state, source, dest, inputSize, 0, noLimit);
+ return LZ4HC_compress_generic (state, source, dest, inputSize, 0, compressionLevel, noLimit);
}
+int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize)
+{ return LZ4_compressHC2_withStateHC (state, source, dest, inputSize, 0); }
-int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
+
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
{
- if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; // Error : state is not aligned for pointers (32 or 64 bits)
+ if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
LZ4_initHC ((LZ4HC_Data_Structure*)state, (const BYTE*)source);
- return LZ4HC_compress_generic (state, source, dest, inputSize, maxOutputSize, limitedOutput);
+ return LZ4HC_compress_generic (state, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
}
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
+{ return LZ4_compressHC2_limitedOutput_withStateHC (state, source, dest, inputSize, maxOutputSize, 0); }
+
-//****************************
-// Stream functions
-//****************************
+/****************************
+ Stream functions
+****************************/
int LZ4_compressHC_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize)
{
- return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, noLimit);
+ return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, 0, noLimit);
+}
+
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel)
+{
+ return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, compressionLevel, noLimit);
}
int LZ4_compressHC_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
{
- return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, limitedOutput);
+ return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, 0, limitedOutput);
}
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
+{
+ return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
+}