summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md12
-rw-r--r--contrib/meson/meson/meson.build5
-rw-r--r--lib/lz4.c99
-rw-r--r--lib/lz4frame.c3
-rw-r--r--lib/lz4frame.h2
-rw-r--r--lib/lz4hc.c4
-rw-r--r--tests/frametest.c18
-rwxr-xr-xtests/test_install.sh2
9 files changed, 90 insertions, 57 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index cb4b843..0c0e7a7 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -1,5 +1,5 @@
freebsd_instance:
- image_family: freebsd-12-0
+ image_family: freebsd-12-1
task:
script: pkg install -y gmake && gmake test
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 96d9476..86b7696 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -10,20 +10,18 @@ assignees: ''
**Describe the bug**
A clear and concise description of what the bug is.
+**Expected behavior**
+Please describe what you expected to happen.
+
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
+4. See error '...'
If applicable, add screenshots to help explain your problem.
-**Desktop (please complete the following information):**
+**System (please complete the following information):**
- OS: [e.g. Mac]
- Version [e.g. 22]
- Compiler [e.g. gcc]
diff --git a/contrib/meson/meson/meson.build b/contrib/meson/meson/meson.build
index 387e7bd..b278b7c 100644
--- a/contrib/meson/meson/meson.build
+++ b/contrib/meson/meson/meson.build
@@ -9,7 +9,6 @@
cc = meson.get_compiler('c')
pkgconfig = import('pkgconfig')
-python3 = import('python').find_installation()
c_std = get_option('c_std')
default_library = get_option('default_library')
@@ -28,8 +27,8 @@ compiler_msvc = 'msvc'
lz4_version = meson.project_version()
lz4_h_file = join_paths(meson.current_source_dir(), '../../../lib/lz4.h')
-GetLz4LibraryVersion_py = files('GetLz4LibraryVersion.py')
-r = run_command(python3, GetLz4LibraryVersion_py, lz4_h_file)
+GetLz4LibraryVersion_py = find_program('GetLz4LibraryVersion.py', native : true)
+r = run_command(GetLz4LibraryVersion_py, lz4_h_file)
if r.returncode() == 0
lz4_version = r.stdout().strip()
message('Project version is now: @0@'.format(lz4_version))
diff --git a/lib/lz4.c b/lib/lz4.c
index 82ab490..ae50d54 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -277,6 +277,21 @@ typedef enum {
/*-************************************
* Reading and writing into memory
**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+
static unsigned LZ4_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
@@ -311,27 +326,27 @@ static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = val
static U16 LZ4_read16(const void* memPtr)
{
- U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static U32 LZ4_read32(const void* memPtr)
{
- U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static reg_t LZ4_read_ARCH(const void* memPtr)
{
- reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static void LZ4_write16(void* memPtr, U16 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ LZ4_memcpy(memPtr, &value, sizeof(value));
}
static void LZ4_write32(void* memPtr, U32 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ LZ4_memcpy(memPtr, &value, sizeof(value));
}
#endif /* LZ4_FORCE_MEMORY_ACCESS */
@@ -366,7 +381,7 @@ void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
}
static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
@@ -397,11 +412,11 @@ LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, con
dstPtr[2] = srcPtr[2];
dstPtr[3] = srcPtr[3];
srcPtr += inc32table[offset];
- memcpy(dstPtr+4, srcPtr, 4);
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
srcPtr -= dec64table[offset];
dstPtr += 8;
} else {
- memcpy(dstPtr, srcPtr, 8);
+ LZ4_memcpy(dstPtr, srcPtr, 8);
dstPtr += 8;
srcPtr += 8;
}
@@ -419,7 +434,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
}
/* LZ4_memcpy_using_offset() presumes :
@@ -438,23 +453,23 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
memset(v, *srcPtr, 8);
break;
case 2:
- memcpy(v, srcPtr, 2);
- memcpy(&v[2], srcPtr, 2);
- memcpy(&v[4], &v[0], 4);
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], &v[0], 4);
break;
case 4:
- memcpy(v, srcPtr, 4);
- memcpy(&v[4], srcPtr, 4);
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
break;
default:
LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
return;
}
- memcpy(dstPtr, v, 8);
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
while (dstPtr < dstEnd) {
- memcpy(dstPtr, v, 8);
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
}
}
@@ -609,7 +624,7 @@ typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
+int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
/*-************************************
@@ -819,7 +834,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
- const BYTE* const dictEnd = dictionary + dictSize;
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
const BYTE* anchor = (const BYTE*) source;
const BYTE* const iend = ip + inputSize;
const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
@@ -827,7 +842,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */
- const BYTE* dictBase = (dictDirective == usingDictCtx) ?
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
dictionary + dictSize - dictCtx->currentOffset :
dictionary + dictSize - startIndex;
@@ -1166,7 +1181,7 @@ _last_literals:
} else {
*op++ = (BYTE)(lastRun<<ML_BITS);
}
- memcpy(op, anchor, lastRun);
+ LZ4_memcpy(op, anchor, lastRun);
ip = anchor + lastRun;
op += lastRun;
}
@@ -1349,8 +1364,8 @@ LZ4_stream_t* LZ4_createStream(void)
while actually aligning LZ4_stream_t on 4 bytes. */
static size_t LZ4_stream_t_alignment(void)
{
- struct { char c; LZ4_stream_t t; } t_a;
- return sizeof(t_a) - sizeof(t_a.t);
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
}
#endif
@@ -1542,7 +1557,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
* cost to copy the dictionary's tables into the active context,
* so that the compression loop is only looking into one table.
*/
- memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
} else {
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
@@ -1750,12 +1765,12 @@ LZ4_decompress_generic(
/* We don't need to check oend, since we check it once for each loop below */
if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
/* Literals can only be 14, but hope compilers optimize if we copy by a register size */
- memcpy(op, ip, 16);
+ LZ4_memcpy(op, ip, 16);
} else { /* LZ4_decompress_fast() */
/* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
* it doesn't know input length, and relies on end-of-block properties */
- memcpy(op, ip, 8);
- if (length > 8) { memcpy(op+8, ip+8, 8); }
+ LZ4_memcpy(op, ip, 8);
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
}
ip += length; op = cpy;
}
@@ -1791,9 +1806,9 @@ LZ4_decompress_generic(
assert(match <= op);
assert(op + 18 <= oend);
- memcpy(op, match, 8);
- memcpy(op+8, match+8, 8);
- memcpy(op+16, match+16, 2);
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
op += length;
continue;
} } }
@@ -1816,14 +1831,14 @@ LZ4_decompress_generic(
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) { *op++ = *copyFrom++; }
} else {
- memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
@@ -1864,7 +1879,7 @@ LZ4_decompress_generic(
/* strictly "less than" on input, to re-enter the loop with at least one byte */
&& likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
/* Copy the literals */
- memcpy(op, ip, endOnInput ? 16 : 8);
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
/* The second stage: prepare for match copying, decode full info.
@@ -1879,9 +1894,9 @@ LZ4_decompress_generic(
&& (offset >= 8)
&& (dict==withPrefix64k || match >= lowPrefix) ) {
/* Copy the match. */
- memcpy(op + 0, match + 0, 8);
- memcpy(op + 8, match + 8, 8);
- memcpy(op +16, match +16, 2);
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
op += length + MINMATCH;
/* Both stages worked, load the next token. */
continue;
@@ -1994,14 +2009,14 @@ LZ4_decompress_generic(
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) *op++ = *copyFrom++;
} else {
- memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
@@ -2020,7 +2035,7 @@ LZ4_decompress_generic(
if (matchEnd > op) { /* overlap copy */
while (op < copyEnd) { *op++ = *match++; }
} else {
- memcpy(op, match, mlen);
+ LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
if (op == oend) { break; }
@@ -2034,10 +2049,10 @@ LZ4_decompress_generic(
op[2] = match[2];
op[3] = match[3];
match += inc32table[offset];
- memcpy(op+4, match, 4);
+ LZ4_memcpy(op+4, match, 4);
match -= dec64table[offset];
} else {
- memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
match += 8;
}
op += 8;
@@ -2052,7 +2067,7 @@ LZ4_decompress_generic(
}
while (op < cpy) { *op++ = *match++; }
} else {
- memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
@@ -2378,7 +2393,7 @@ int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize,
/* Obsolete Streaming functions */
-int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
+int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
int LZ4_resetStreamState(void* state, char* inputBuffer)
{
diff --git a/lib/lz4frame.c b/lib/lz4frame.c
index c9f630d..5d716ea 100644
--- a/lib/lz4frame.c
+++ b/lib/lz4frame.c
@@ -725,6 +725,9 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
*/
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
{
+ if (preferencesPtr && preferencesPtr->autoFlush) {
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
+ }
return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
}
diff --git a/lib/lz4frame.h b/lib/lz4frame.h
index eba9c9e..77d682b 100644
--- a/lib/lz4frame.h
+++ b/lib/lz4frame.h
@@ -289,7 +289,7 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
* @return is always the same for a srcSize and prefsPtr.
* prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
* tech details :
- * @return includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
* It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
* @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
*/
diff --git a/lib/lz4hc.c b/lib/lz4hc.c
index b75514f..687f87e 100644
--- a/lib/lz4hc.c
+++ b/lib/lz4hc.c
@@ -886,8 +886,8 @@ int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
* while actually aligning LZ4_streamHC_t on 4 bytes. */
static size_t LZ4_streamHC_t_alignment(void)
{
- struct { char c; LZ4_streamHC_t t; } t_a;
- return sizeof(t_a) - sizeof(t_a.t);
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
}
#endif
diff --git a/tests/frametest.c b/tests/frametest.c
index 1b932e4..f891530 100644
--- a/tests/frametest.c
+++ b/tests/frametest.c
@@ -200,6 +200,24 @@ int basicTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, " %u \n", (U32)cBound);
}
+ /* LZ4F_compressBound() : special case : automatic flushing enabled */
+ DISPLAYLEVEL(3, "LZ4F_compressBound(1 KB, autoFlush=1) = ");
+ { size_t cBound;
+ LZ4F_preferences_t autoFlushPrefs;
+ memset(&autoFlushPrefs, 0, sizeof(autoFlushPrefs));
+ autoFlushPrefs.autoFlush = 1;
+ cBound = LZ4F_compressBound(1 KB, &autoFlushPrefs);
+ if (cBound > 64 KB) goto _output_error;
+ DISPLAYLEVEL(3, " %u \n", (U32)cBound);
+ }
+
+ /* LZ4F_compressBound() : special case : automatic flushing disabled */
+ DISPLAYLEVEL(3, "LZ4F_compressBound(1 KB, autoFlush=0) = ");
+ { size_t const cBound = LZ4F_compressBound(1 KB, &prefs);
+ if (cBound < 64 KB) goto _output_error;
+ DISPLAYLEVEL(3, " %u \n", (U32)cBound);
+ }
+
/* Special case : null-content frame */
testSize = 0;
DISPLAYLEVEL(3, "LZ4F_compressFrame, compress null content : ");
diff --git a/tests/test_install.sh b/tests/test_install.sh
index ba87934..122bac5 100755
--- a/tests/test_install.sh
+++ b/tests/test_install.sh
@@ -6,7 +6,7 @@ make="make -C $lz4_root"
unamestr=$(uname)
if [ "$unamestr" = 'Linux' ]; then
make="make -C $lz4_root"
-elif [ "$unamestr" = 'FreeBSD' ]; then
+elif [ "$unamestr" = 'FreeBSD' -o "$unamestr" = 'OpenBSD' ]; then
make="gmake -C $lz4_root"
fi