summaryrefslogtreecommitdiffstats
path: root/lz4.c
diff options
context:
space:
mode:
authoryann.collet.73@gmail.com <yann.collet.73@gmail.com@650e7d94-2a16-8b24-b05c-7c0b3f6821cd>2012-05-07 23:10:02 (GMT)
committeryann.collet.73@gmail.com <yann.collet.73@gmail.com@650e7d94-2a16-8b24-b05c-7c0b3f6821cd>2012-05-07 23:10:02 (GMT)
commitae9eead4aeef810acfc5ee768f72d92b5028fc9f (patch)
tree7a73ede19e9da511d875c8ecf5aa6e6a05bdbe40 /lz4.c
parent6cedd1f252cd091be19f6a56526a5106b5140943 (diff)
downloadlz4-ae9eead4aeef810acfc5ee768f72d92b5028fc9f.zip
lz4-ae9eead4aeef810acfc5ee768f72d92b5028fc9f.tar.gz
lz4-ae9eead4aeef810acfc5ee768f72d92b5028fc9f.tar.bz2
Corrected : linking stage of Visual 2005 in Debug mode (issue 16)
Minor changes in comments git-svn-id: https://lz4.googlecode.com/svn/trunk@63 650e7d94-2a16-8b24-b05c-7c0b3f6821cd
Diffstat (limited to 'lz4.c')
-rw-r--r--lz4.c112
1 files changed, 60 insertions, 52 deletions
diff --git a/lz4.c b/lz4.c
index ba0d3fd..826b785 100644
--- a/lz4.c
+++ b/lz4.c
@@ -65,14 +65,15 @@
//**************************************
// 32 or 64 bits ?
#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
-#define LZ4_ARCH64 1
+# define LZ4_ARCH64 1
#else
-#define LZ4_ARCH64 0
+# define LZ4_ARCH64 0
#endif
// Little Endian or Big Endian ?
+// Note : overwrite the below #define if you know your architecture endianess
#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
-#define LZ4_BIG_ENDIAN 1
+# define LZ4_BIG_ENDIAN 1
#else
// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
#endif
@@ -81,7 +82,7 @@
// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
#if defined(__ARM_FEATURE_UNALIGNED)
-#define LZ4_FORCE_UNALIGNED_ACCESS 1
+# define LZ4_FORCE_UNALIGNED_ACCESS 1
#endif
// Uncomment this parameter if your target system or compiler does not support hardware bit count
@@ -95,26 +96,33 @@
#if __STDC_VERSION__ >= 199901L // C99
/* "restrict" is a known keyword */
#else
-#define restrict // Disable restrict
+# define restrict // Disable restrict
#endif
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#ifdef _MSC_VER // Visual Studio
-#define inline __forceinline // Visual is not C99, but supports some kind of inline
-#include <intrin.h> // _BitScanForward
+# define inline __forceinline // Visual is not C99, but supports some kind of inline
+# include <intrin.h> // _BitScanForward
+# if LZ4_ARCH64 // 64-bit
+# pragma intrinsic(_BitScanForward64) // For Visual 2005
+# pragma intrinsic(_BitScanReverse64) // For Visual 2005
+# else
+# pragma intrinsic(_BitScanForward) // For Visual 2005
+# pragma intrinsic(_BitScanReverse) // For Visual 2005
+# endif
#endif
#ifdef _MSC_VER
-#define lz4_bswap16(x) _byteswap_ushort(x)
+# define lz4_bswap16(x) _byteswap_ushort(x)
#else
-#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
#endif
#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
-# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
#else
-# define expect(expr,value) (expr)
+# define expect(expr,value) (expr)
#endif
#define likely(expr) expect((expr) != 0, 1)
@@ -133,22 +141,22 @@
// Basic Types
//**************************************
#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
-#define BYTE unsigned __int8
-#define U16 unsigned __int16
-#define U32 unsigned __int32
-#define S32 __int32
-#define U64 unsigned __int64
+# define BYTE unsigned __int8
+# define U16 unsigned __int16
+# define U32 unsigned __int32
+# define S32 __int32
+# define U64 unsigned __int64
#else
-#include <stdint.h>
-#define BYTE uint8_t
-#define U16 uint16_t
-#define U32 uint32_t
-#define S32 int32_t
-#define U64 uint64_t
+# include <stdint.h>
+# define BYTE uint8_t
+# define U16 uint16_t
+# define U32 uint32_t
+# define S32 int32_t
+# define U64 uint64_t
#endif
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
-#pragma pack(push, 1)
+# pragma pack(push, 1)
#endif
typedef struct _U16_S { U16 v; } U16_S;
@@ -156,7 +164,7 @@ typedef struct _U32_S { U32 v; } U32_S;
typedef struct _U64_S { U64 v; } U64_S;
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
-#pragma pack(pop)
+# pragma pack(pop)
#endif
#define A64(x) (((U64_S *)(x))->v)
@@ -194,31 +202,31 @@ typedef struct _U64_S { U64 v; } U64_S;
// Architecture-specific macros
//**************************************
#if LZ4_ARCH64 // 64-bit
-#define STEPSIZE 8
-#define UARCH U64
-#define AARCH A64
-#define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
-#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
-#define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
-#define HTYPE U32
-#define INITBASE(base) const BYTE* const base = ip
+# define STEPSIZE 8
+# define UARCH U64
+# define AARCH A64
+# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
+# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
+# define HTYPE U32
+# define INITBASE(base) const BYTE* const base = ip
#else // 32-bit
-#define STEPSIZE 4
-#define UARCH U32
-#define AARCH A32
-#define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
-#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
-#define LZ4_SECURECOPY LZ4_WILDCOPY
-#define HTYPE const BYTE*
-#define INITBASE(base) const int base = 0
+# define STEPSIZE 4
+# define UARCH U32
+# define AARCH A32
+# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
+# define LZ4_SECURECOPY LZ4_WILDCOPY
+# define HTYPE const BYTE*
+# define INITBASE(base) const int base = 0
#endif
#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
-#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
+# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
#else // Little Endian
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
-#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
+# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
#endif
@@ -663,7 +671,7 @@ int LZ4_uncompress(const char* source,
cpy = op+length;
if unlikely(cpy>oend-COPYLENGTH)
{
- if (cpy > oend) goto _output_error;
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
memcpy(op, ip, length);
ip += length;
break; // Necessarily EOF
@@ -672,7 +680,7 @@ int LZ4_uncompress(const char* source,
// get offset
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
- if (ref < (BYTE* const)dest) goto _output_error;
+ if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
// get matchlength
if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
@@ -697,7 +705,7 @@ int LZ4_uncompress(const char* source,
cpy = op + length - (STEPSIZE-4);
if (cpy>oend-COPYLENGTH)
{
- if (cpy > oend) goto _output_error;
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
while(op<cpy) *op++=*ref++;
op=cpy;
@@ -749,19 +757,19 @@ int LZ4_uncompress_unknownOutputSize(
cpy = op+length;
if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))
{
- if (cpy > oend) goto _output_error;
- if (ip+length > iend) goto _output_error;
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
+ if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer
memcpy(op, ip, length);
op += length;
ip += length;
- if (ip<iend) goto _output_error;
+ if (ip<iend) goto _output_error; // Error : LZ4 format violation
break; // Necessarily EOF, due to parsing restrictions
}
LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
// get offset
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
- if (ref < (BYTE* const)dest) goto _output_error;
+ if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer
// get matchlength
if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }
@@ -786,7 +794,7 @@ int LZ4_uncompress_unknownOutputSize(
cpy = op + length - (STEPSIZE-4);
if (cpy>oend-COPYLENGTH)
{
- if (cpy > oend) goto _output_error;
+ if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
while(op<cpy) *op++=*ref++;
op=cpy;