summaryrefslogtreecommitdiffstats
path: root/lz4.c
diff options
context:
space:
mode:
Diffstat (limited to 'lz4.c')
-rw-r--r--lz4.c571
1 files changed, 321 insertions, 250 deletions
diff --git a/lz4.c b/lz4.c
index 198b581..fb84955 100644
--- a/lz4.c
+++ b/lz4.c
@@ -31,82 +31,73 @@
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
+
/**************************************
Tuning parameters
**************************************/
/*
* HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/
#define HEAPMODE 0
+/*
+ * CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS :
+ * By default, the source code expects the compiler to correctly optimize
+ * 4-bytes and 8-bytes read on architectures able to handle it efficiently.
+ * This is not always the case. In some circumstances (ARM notably),
+ * the compiler will issue cautious code even when target is able to correctly handle unaligned memory accesses.
+ *
+ * You can force the compiler to use unaligned memory access by uncommenting the line below.
+ * One of the below scenarios will happen :
+ * 1 - Your target CPU correctly handle unaligned access, and was not well optimized by compiler (good case).
+ * You will witness large performance improvements (+50% and up).
+ * Keep the line uncommented and send a word to upstream (https://groups.google.com/forum/#!forum/lz4c)
+ * The goal is to automatically detect such situations by adding your target CPU within an exception list.
+ * 2 - Your target CPU correctly handle unaligned access, and was already already optimized by compiler
+ * No change will be experienced.
+ * 3 - Your target CPU inefficiently handle unaligned access.
+ * You will experience a performance loss. Comment back the line.
+ * 4 - Your target CPU does not handle unaligned access.
+ * Program will crash.
+ * If uncommenting results in better performance (case 1)
+ * please report your configuration to upstream (https://groups.google.com/forum/#!forum/lz4c)
+ * An automatic detection macro will be added to match your case within future versions of the library.
+ */
+/* #define CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS 1 */
+
/**************************************
CPU Feature Detection
**************************************/
-/* 32 or 64 bits ? */
-#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
- || defined(__64BIT__) || defined(__mips64) \
- || defined(__powerpc64__) || defined(__powerpc64le__) \
- || defined(__ppc64__) || defined(__ppc64le__) \
- || defined(__PPC64__) || defined(__PPC64LE__) \
- || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \
- || defined(__s390x__) ) /* Detects 64 bits mode */
-# define LZ4_ARCH64 1
-#else
-# define LZ4_ARCH64 0
-#endif
-#define LZ4_32BITS (sizeof(void*)==4)
-#define LZ4_64BITS (sizeof(void*)==8)
-
/*
- * Little Endian or Big Endian ?
- * Overwrite the #define below if you know your architecture endianess
+ * Automated efficient unaligned memory access detection
+ * Based on known hardware architectures
+ * This list will be updated thanks to feedbacks
*/
-#include <stdlib.h> /* Apparently required to detect endianess */
-#if defined (__GLIBC__)
-# include <endian.h>
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-# define LZ4_BIG_ENDIAN 1
-# endif
-#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
-# define LZ4_BIG_ENDIAN 1
-#elif defined(__sparc) || defined(__sparc__) \
- || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
- || defined(__hpux) || defined(__hppa) \
- || defined(_MIPSEB) || defined(__s390__)
-# define LZ4_BIG_ENDIAN 1
+#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
+ || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(__i386__) || defined(__x86_64__) \
+ || defined(_M_IX86) || defined(_M_X64) \
+ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
+ || (defined(_M_ARM) && (_M_ARM >= 7))
+# define LZ4_UNALIGNED_ACCESS 1
#else
-/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
+# define LZ4_UNALIGNED_ACCESS 0
#endif
/*
- * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
- * For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
- * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
*/
-#if defined(__ARM_FEATURE_UNALIGNED)
-# define LZ4_FORCE_UNALIGNED_ACCESS 1
-#endif
-
-/* Define this parameter if your target system or compiler does not support hardware bit count */
#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
# define LZ4_FORCE_SW_BITCOUNT
#endif
-/*
- * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
- * This option may provide a small boost to performance for some big endian cpu, although probably modest.
- * You may set this option to 1 if data will remain within closed environment.
- * This option is useless on Little_Endian CPU (such as x86)
- */
-
-/* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
-
/**************************************
- Compiler Options
+ Compiler Options
**************************************/
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
/* "restrict" is a known keyword */
@@ -116,14 +107,7 @@
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
-# include <intrin.h> /* For Visual 2005 */
-# if LZ4_ARCH64 /* 64-bits */
-# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
-# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
-# else /* 32-bits */
-# pragma intrinsic(_BitScanForward) /* For Visual 2005 */
-# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
-# endif
+# include <intrin.h>
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#else
# ifdef __GNUC__
@@ -133,12 +117,6 @@
# endif
#endif
-#ifdef _MSC_VER /* Visual Studio */
-# define lz4_bswap16(x) _byteswap_ushort(x)
-#else
-# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
-#endif
-
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
@@ -185,46 +163,130 @@
typedef unsigned long long U64;
#endif
-#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
-# define _PACKED __attribute__ ((packed))
-#else
-# define _PACKED
-#endif
-#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
-# if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-# pragma pack(1)
-# else
-# pragma pack(push, 1)
-# endif
-#endif
+/**************************************
+ Reading and writing into memory
+**************************************/
+#define STEPSIZE sizeof(size_t)
-typedef struct { U16 v; } _PACKED U16_S;
-typedef struct { U32 v; } _PACKED U32_S;
-typedef struct { U64 v; } _PACKED U64_S;
-typedef struct {size_t v;} _PACKED size_t_S;
+static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian()))
+ return *(U16*)memPtr;
+ else
+ {
+ const BYTE* p = memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian()))
+ {
+ *(U16*)memPtr = value;
+ return;
+ }
+ else
+ {
+ BYTE* p = memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
-#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
-# if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-# pragma pack(0)
-# else
-# pragma pack(pop)
-# endif
-#endif
-#define A16(x) (((U16_S *)(x))->v)
-#define A32(x) (((U32_S *)(x))->v)
-#define A64(x) (((U64_S *)(x))->v)
-#define AARCH(x) (((size_t_S *)(x))->v)
+static U16 LZ4_read16(const void* memPtr)
+{
+ if (LZ4_UNALIGNED_ACCESS)
+ return *(U16*)memPtr;
+ else
+ {
+ U16 val16;
+ memcpy(&val16, memPtr, 2);
+ return val16;
+ }
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ if (LZ4_UNALIGNED_ACCESS)
+ return *(U32*)memPtr;
+ else
+ {
+ U32 val32;
+ memcpy(&val32, memPtr, 4);
+ return val32;
+ }
+}
+
+static U64 LZ4_read64(const void* memPtr)
+{
+ if (LZ4_UNALIGNED_ACCESS)
+ return *(U64*)memPtr;
+ else
+ {
+ U64 val64;
+ memcpy(&val64, memPtr, 8);
+ return val64;
+ }
+}
+
+static size_t LZ4_read_ARCH(const void* p)
+{
+ if (LZ4_64bits())
+ return (size_t)LZ4_read64(p);
+ else
+ return (size_t)LZ4_read32(p);
+}
+
+
+static void LZ4_copy4(void* dstPtr, const void* srcPtr)
+{
+ if (LZ4_UNALIGNED_ACCESS)
+ {
+ *(U32*)dstPtr = *(U32*)srcPtr;
+ return;
+ }
+ memcpy(dstPtr, srcPtr, 4);
+}
+
+static void LZ4_copy8(void* dstPtr, const void* srcPtr)
+{
+ if (LZ4_UNALIGNED_ACCESS)
+ {
+ if (LZ4_64bits())
+ *(U64*)dstPtr = *(U64*)srcPtr;
+ else
+ ((U32*)dstPtr)[0] = ((U32*)srcPtr)[0],
+ ((U32*)dstPtr)[1] = ((U32*)srcPtr)[1];
+ return;
+ }
+ memcpy(dstPtr, srcPtr, 8);
+}
+
+/* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
+static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = dstPtr;
+ const BYTE* s = srcPtr;
+ BYTE* e = dstEnd;
+ do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
+}
/**************************************
- Constants
+ Common Constants
**************************************/
-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
-#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
-#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
-
#define MINMATCH 4
#define COPYLENGTH 8
@@ -232,13 +294,10 @@ typedef struct {size_t v;} _PACKED size_t_S;
#define MFLIMIT (COPYLENGTH+MINMATCH)
static const int LZ4_minLength = (MFLIMIT+1);
-#define KB *(1U<<10)
-#define MB *(1U<<20)
+#define KB *(1 <<10)
+#define MB *(1 <<20)
#define GB *(1U<<30)
-#define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
-#define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
-
#define MAXD_LOG 16
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
@@ -249,130 +308,150 @@ static const int LZ4_minLength = (MFLIMIT+1);
/**************************************
- Structures and local types
+ Common Utils
**************************************/
-typedef struct {
- U32 hashTable[HASH_SIZE_U32];
- U32 currentOffset;
- U32 initCheck;
- const BYTE* dictionary;
- const BYTE* bufferStart;
- U32 dictSize;
-} LZ4_stream_t_internal;
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+/********************************
+ Common functions
+********************************/
+static unsigned LZ4_NbCommonBytes (register size_t val)
+{
+ if (LZ4_isLittleEndian())
+ {
+ if (LZ4_64bits())
+ {
+# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64( &r, (U64)val );
+ return (int)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ }
+ else /* 32 bits */
+ {
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward( &r, (U32)val );
+ return (int)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ }
+ else /* Big Endian CPU */
+ {
+ if (LZ4_64bits())
+ {
+# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ }
+ else /* 32 bits */
+ {
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse( &r, (unsigned long)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clz(val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ }
+ }
+}
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ while (likely(pIn<pInLimit-(STEPSIZE-1)))
+ {
+ size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+ if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+
+ return (unsigned)(pIn - pStart);
+}
+#ifndef LZ4_COMMONDEFS_ONLY
/**************************************
- Architecture-specific macros
+ Local Common Constants
**************************************/
-#define STEPSIZE sizeof(size_t)
-#define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
-#define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
-
-#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
-# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
-# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
-#else /* Little Endian */
-# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
-# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
-#endif
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
+
+#define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
+#define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
/**************************************
- Macros
+ Local Utils
**************************************/
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
-#if LZ4_ARCH64 || !defined(__GNUC__)
-# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
-#else
-# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
-#endif
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-/****************************
- Private local functions
-****************************/
-#if LZ4_ARCH64
+/**************************************
+ Local Structures and types
+**************************************/
+typedef struct {
+ U32 hashTable[HASH_SIZE_U32];
+ U32 currentOffset;
+ U32 initCheck;
+ const BYTE* dictionary;
+ const BYTE* bufferStart;
+ U32 dictSize;
+} LZ4_stream_t_internal;
-static int LZ4_NbCommonBytes (register U64 val)
-{
-# if defined(LZ4_BIG_ENDIAN)
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanReverse64( &r, val );
- return (int)(r>>3);
-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_clzll(val) >> 3);
-# else
- int r;
- if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-# endif
-# else
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanForward64( &r, val );
- return (int)(r>>3);
-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_ctzll(val) >> 3);
-# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
-# endif
-}
+typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
-#else
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-static int LZ4_NbCommonBytes (register U32 val)
-{
-# if defined(LZ4_BIG_ENDIAN)
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanReverse( &r, val );
- return (int)(r>>3);
-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_clz(val) >> 3);
-# else
- int r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
-# endif
-# else
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r;
- _BitScanForward( &r, val );
- return (int)(r>>3);
-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_ctz(val) >> 3);
-# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
-# endif
-}
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-#endif
/********************************
Compression functions
********************************/
-int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
-int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
+static U32 LZ4_hashSequence(U32 sequence, tableType_t tableType)
{
if (tableType == byU16)
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
@@ -380,15 +459,15 @@ static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
}
-static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
+static U32 LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(LZ4_read32(p), tableType); }
static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
switch (tableType)
{
- case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
}
}
@@ -411,32 +490,12 @@ static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t t
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
-static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
-{
- const BYTE* const pStart = pIn;
-
- while (likely(pIn<pInLimit-(STEPSIZE-1)))
- {
- size_t diff = AARCH(pRef) ^ AARCH(pIn);
- if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
- pIn += LZ4_NbCommonBytes(diff);
- return (unsigned)(pIn - pStart);
- }
- if (LZ4_64BITS) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
- if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
- if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
-
- return (unsigned)(pIn - pStart);
-}
-
-
static int LZ4_compress_generic(
void* ctx,
const char* source,
char* dest,
int inputSize,
int maxOutputSize,
-
limitedOutput_directive outputLimited,
tableType_t tableType,
dict_directive dict,
@@ -526,7 +585,7 @@ static int LZ4_compress_generic(
} while ( ((dictIssue==dictSmall) ? (ref < lowRefLimit) : 0)
|| ((tableType==byU16) ? 0 : (ref + MAX_DISTANCE < ip))
- || (A32(ref+refDelta) != A32(ip)) );
+ || (LZ4_read32(ref+refDelta) != LZ4_read32(ip)) );
}
/* Catch up */
@@ -548,12 +607,13 @@ static int LZ4_compress_generic(
else *token = (BYTE)(litLength<<ML_BITS);
/* Copy Literals */
- { BYTE* end = op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
+ LZ4_wildCopy(op, anchor, op+litLength);
+ op+=litLength;
}
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (U16)(ip-ref));
+ LZ4_writeLE16(op, (U16)(ip-ref)); op+=2;
/* Encode MatchLength */
{
@@ -619,7 +679,7 @@ _next_match:
LZ4_putPosition(ip, ctx, tableType, base);
if ( ((dictIssue==dictSmall) ? (ref>=lowRefLimit) : 1)
&& (ref+MAX_DISTANCE>=ip)
- && (A32(ref+refDelta)==A32(ip)) )
+ && (LZ4_read32(ref+refDelta)==LZ4_read32(ip)) )
{ token=op++; *token=0; goto _next_match; }
/* Prepare next loop */
@@ -646,16 +706,16 @@ _last_literals:
int LZ4_compress(const char* source, char* dest, int inputSize)
{
#if (HEAPMODE)
- void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
+ void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U64, 8); /* Aligned on 8-bytes boundaries */
#else
- U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
+ U64 ctx[LZ4_STREAMSIZE_U64] = {0}; /* Ensure data is aligned on 8-bytes boundaries */
#endif
int result;
if (inputSize < (int)LZ4_64KLIMIT)
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
else
- result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, LZ4_64BITS ? byU32 : byPtr, noDict, noDictIssue);
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
#if (HEAPMODE)
FREEMEM(ctx);
@@ -666,16 +726,16 @@ int LZ4_compress(const char* source, char* dest, int inputSize)
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
{
#if (HEAPMODE)
- void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
+ void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U64, 8); /* Aligned on 8-bytes boundaries */
#else
- U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
+ U64 ctx[LZ4_STREAMSIZE_U64] = {0}; /* Ensure data is aligned on 8-bytes boundaries */
#endif
int result;
if (inputSize < (int)LZ4_64KLIMIT)
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
else
- result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64BITS ? byU32 : byPtr, noDict, noDictIssue);
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
#if (HEAPMODE)
FREEMEM(ctx);
@@ -700,7 +760,7 @@ void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
LZ4_stream_t* LZ4_createStream(void)
{
- LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(4, LZ4_STREAMSIZE_U32);
+ LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
LZ4_resetStream(lz4s);
return lz4s;
@@ -956,14 +1016,16 @@ FORCE_INLINE int LZ4_decompress_generic(
op += length;
break; /* Necessarily EOF, due to parsing restrictions */
}
- LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length; op = cpy;
/* get offset */
- LZ4_READ_LITTLEENDIAN_16(match,cpy,ip); ip+=2;
+ match = cpy - LZ4_readLE16(ip); ip+=2;
if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
/* get matchlength */
- if ((length=(token&ML_MASK)) == ML_MASK)
+ length = token & ML_MASK;
+ if (length == ML_MASK)
{
unsigned s;
do
@@ -1012,7 +1074,7 @@ FORCE_INLINE int LZ4_decompress_generic(
/* copy repeated sequence */
cpy = op + length;
- if (unlikely((op-match)<(int)STEPSIZE))
+ if (unlikely((op-match)<8))
{
const size_t dec64 = dec64table[op-match];
op[0] = match[0];
@@ -1020,17 +1082,23 @@ FORCE_INLINE int LZ4_decompress_generic(
op[2] = match[2];
op[3] = match[3];
match += dec32table[op-match];
- A32(op+4) = A32(match);
+ LZ4_copy4(op+4, match);
op += 8; match -= dec64;
- } else { LZ4_COPY8(op,match); }
+ } else { LZ4_copy8(op, match); op+=8; match+=8; }
if (unlikely(cpy>oend-12))
{
- if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
- if (op<oend-COPYLENGTH) LZ4_WILDCOPY(op, match, (oend-COPYLENGTH));
- while(op<cpy) *op++=*match++;
+ if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */
+ if (op < oend-8)
+ {
+ LZ4_wildCopy(op, match, oend-8);
+ match += (oend-8) - op;
+ op = oend-8;
+ }
+ while (op<cpy) *op++ = *match++;
}
- else LZ4_WILDCOPY(op, match, cpy);
+ else
+ LZ4_wildCopy(op, match, cpy);
op=cpy; /* correction */
}
@@ -1079,7 +1147,7 @@ typedef struct
*/
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
{
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(sizeof(U32), LZ4_STREAMDECODESIZE_U32);
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(sizeof(U64), LZ4_STREAMDECODESIZE_U64);
return lz4s;
}
@@ -1241,7 +1309,7 @@ int LZ4_resetStreamState(void* state, const char* inputBuffer)
void* LZ4_create (const char* inputBuffer)
{
- void* lz4ds = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
+ void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64);
LZ4_init ((LZ4_stream_t_internal*)lz4ds, (const BYTE*)inputBuffer);
return lz4ds;
}
@@ -1267,7 +1335,7 @@ int LZ4_compress_withState (void* state, const char* source, char* dest, int inp
if (inputSize < (int)LZ4_64KLIMIT)
return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
else
- return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64BITS ? byU32 : byPtr, noDict, noDictIssue);
+ return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
}
int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
@@ -1278,7 +1346,7 @@ int LZ4_compress_limitedOutput_withState (void* state, const char* source, char*
if (inputSize < (int)LZ4_64KLIMIT)
return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
else
- return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64BITS ? byU32 : byPtr, noDict, noDictIssue);
+ return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
}
/* Obsolete streaming decompression functions */
@@ -1292,3 +1360,6 @@ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int origin
{
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
+