summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/ci.yml14
-rw-r--r--Makefile16
-rw-r--r--build/cmake/.gitignore3
-rw-r--r--build/cmake/CMakeLists.txt4
-rw-r--r--doc/lz4_Block_format.md71
-rw-r--r--lib/lz4.c2
-rw-r--r--lib/lz4.h19
-rw-r--r--lib/lz4frame.c4
-rw-r--r--lib/lz4frame.h8
-rw-r--r--lib/lz4hc.c2
-rw-r--r--programs/lz4io.c87
-rw-r--r--programs/lz4io.h2
-rw-r--r--tests/Makefile10
-rw-r--r--tests/frametest.c19
-rw-r--r--tests/test-lz4-list.py91
15 files changed, 212 insertions, 140 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a458a19..54ea2b6 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -236,6 +236,13 @@ jobs:
- name: LZ4 frame test (32-bit)
run: make V=1 -C tests test-frametest32
+ lz4-memory-usage:
+ name: test different values of LZ4_MEMORY_USAGE
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2 # https://github.com/actions/checkout
+ - name: LZ4_MEMORY_USAGE
+ run: make V=1 -C tests test-compile-with-lz4-memory-usage
# Custom LZ4_DISTANCE_MAX ; lz4-wlib (CLI linked to dynamic library); LZ4_USER_MEMORY_FUNCTIONS
lz4-custom-distance:
@@ -244,7 +251,7 @@ jobs:
steps:
- uses: actions/checkout@v2 # https://github.com/actions/checkout
- - name: custom LZ4_DISTANCE_MAX
+ - name: custom LZ4_DISTANCE_MAX; test LZ4_USER_MEMORY_FUNCTIONS
run: |
MOREFLAGS='-DLZ4_DISTANCE_MAX=8000' make V=1 check
make V=1 clean
@@ -392,7 +399,6 @@ jobs:
- name: unicode lint
run: bash ./tests/unicode_lint.sh
-
lz4-examples:
name: make examples
runs-on: ubuntu-latest
@@ -660,8 +666,8 @@ jobs:
matrix:
include: [
{ os: ubuntu-latest, }, # https://github.com/actions/virtual-environments/
- { os: ubuntu-20.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-README.md
- { os: ubuntu-18.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu1804-README.md
+ { os: ubuntu-20.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md
+ { os: ubuntu-18.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu1804-Readme.md
]
runs-on: ${{ matrix.os }}
diff --git a/Makefile b/Makefile
index 7a5c1ef..fd62945 100644
--- a/Makefile
+++ b/Makefile
@@ -84,11 +84,12 @@ clean:
$(MAKE) -C $(FUZZDIR) $@ > $(VOID)
$(MAKE) -C contrib/gen_manual $@ > $(VOID)
$(RM) lz4$(EXT)
+ $(RM) -r $(CMAKE_BUILD_DIR)
@echo Cleaning completed
#-----------------------------------------------------------------------------
-# make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets
+# make install is validated only for Posix environments
#-----------------------------------------------------------------------------
ifeq ($(POSIX_ENV),Yes)
HOST_OS = POSIX
@@ -102,21 +103,24 @@ install uninstall:
travis-install:
$(MAKE) -j1 install DESTDIR=~/install_test_dir
-.PHONY: cmake
-cmake:
- cd build/cmake; cmake $(CMAKE_PARAMS) CMakeLists.txt; $(MAKE)
-
endif # POSIX_ENV
+CMAKE ?= cmake
+CMAKE_BUILD_DIR ?= build/cmake/build
ifneq (,$(filter MSYS%,$(shell $(UNAME))))
HOST_OS = MSYS
CMAKE_PARAMS = -G"MSYS Makefiles"
endif
+.PHONY: cmake
+cmake:
+ mkdir -p $(CMAKE_BUILD_DIR)
+ cd $(CMAKE_BUILD_DIR); $(CMAKE) $(CMAKE_PARAMS) ..; $(CMAKE) --build .
+
#------------------------------------------------------------------------
-#make tests validated only for MSYS, Linux, OSX, kFreeBSD and Hurd targets
+# make tests validated only for MSYS and Posix environments
#------------------------------------------------------------------------
ifneq (,$(filter $(HOST_OS),MSYS POSIX))
diff --git a/build/cmake/.gitignore b/build/cmake/.gitignore
index d39505d..0ad8240 100644
--- a/build/cmake/.gitignore
+++ b/build/cmake/.gitignore
@@ -1,4 +1,4 @@
-# cmake artefact
+# cmake build artefact
CMakeCache.txt
CMakeFiles
@@ -7,3 +7,4 @@ Makefile
liblz4.pc
lz4c
install_manifest.txt
+build
diff --git a/build/cmake/CMakeLists.txt b/build/cmake/CMakeLists.txt
index 0c15ab5..e92115b 100644
--- a/build/cmake/CMakeLists.txt
+++ b/build/cmake/CMakeLists.txt
@@ -104,7 +104,7 @@ set(LZ4_LIBRARIES_BUILT)
if(BUILD_SHARED_LIBS)
add_library(lz4_shared SHARED ${LZ4_SOURCES})
target_include_directories(lz4_shared
- PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+ PUBLIC $<BUILD_INTERFACE:${LZ4_LIB_SOURCE_DIR}>
INTERFACE $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
set_target_properties(lz4_shared PROPERTIES
OUTPUT_NAME lz4
@@ -123,7 +123,7 @@ if(BUILD_STATIC_LIBS)
endif()
add_library(lz4_static STATIC ${LZ4_SOURCES})
target_include_directories(lz4_static
- PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+ PUBLIC $<BUILD_INTERFACE:${LZ4_LIB_SOURCE_DIR}>
INTERFACE $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
set_target_properties(lz4_static PROPERTIES
OUTPUT_NAME ${STATIC_LIB_NAME}
diff --git a/doc/lz4_Block_format.md b/doc/lz4_Block_format.md
index 8f3d4b9..a0017b9 100644
--- a/doc/lz4_Block_format.md
+++ b/doc/lz4_Block_format.md
@@ -1,6 +1,6 @@
LZ4 Block Format Description
============================
-Last revised: 2019-03-30.
+Last revised: 2022-02-02.
Author : Yann Collet
@@ -42,8 +42,9 @@ If the field value is 0, then there is no literal.
If it is 15, then we need to add some more bytes to indicate the full length.
Each additional byte then represent a value from 0 to 255,
which is added to the previous value to produce a total length.
-When the byte value is 255, another byte is output.
-There can be any number of bytes following `token`. There is no "size limit".
+When the byte value is 255, another byte must read and added, and so on.
+There can be any number of bytes of value "255" following `token`.
+There is no "size limit".
(Side note : this is why a not-compressible input block is expanded by 0.4%).
Example 1 : A literal length of 48 will be represented as :
@@ -74,22 +75,23 @@ This is a 2 bytes value, in little endian format
(the 1st byte is the "low" byte, the 2nd one is the "high" byte).
The `offset` represents the position of the match to be copied from.
-1 means "current position - 1 byte".
-The maximum `offset` value is 65535, 65536 cannot be coded.
-Note that 0 is an invalid value, not used.
+For example, 1 means "current position - 1 byte".
+The maximum `offset` value is 65535, 65536 and beyond cannot be coded.
+Note that 0 is an invalid offset value.
+The presence of such a value denotes an invalid (corrupted) block.
-Then we need to extract the `matchlength`.
+Then the `matchlength` can be extracted.
For this, we use the second token field, the low 4-bits.
-Value, obviously, ranges from 0 to 15.
+Such a value, obviously, ranges from 0 to 15.
However here, 0 means that the copy operation will be minimal.
The minimum length of a match, called `minmatch`, is 4.
As a consequence, a 0 value means 4 bytes, and a value of 15 means 19+ bytes.
Similar to literal length, on reaching the highest possible value (15),
-we output additional bytes, one at a time, with values ranging from 0 to 255.
+one must read additional bytes, one at a time, with values ranging from 0 to 255.
They are added to total to provide the final match length.
A 255 value means there is another byte to read and add.
-There is no limit to the number of optional bytes that can be output this way.
-(This points towards a maximum achievable compression ratio of about 250).
+There is no limit to the number of optional "255" bytes that can be present.
+(Note: this points towards a maximum achievable compression ratio of about 250).
Decoding the `matchlength` reaches the end of current sequence.
Next byte will be the start of another sequence.
@@ -97,9 +99,9 @@ But before moving to next sequence,
it's time to use the decoded match position and length.
The decoder copies `matchlength` bytes from match position to current position.
-In some cases, `matchlength` is larger than `offset`.
-Therefore, `match_pos + matchlength > current_pos`,
-which means that later bytes to copy are not yet decoded.
+In some cases, `matchlength` can be larger than `offset`.
+Therefore, since `match_pos + matchlength > current_pos`,
+later bytes to copy are not decoded yet.
This is called an "overlap match", and must be handled with special care.
A common case is an offset of 1,
meaning the last byte is repeated `matchlength` times.
@@ -107,7 +109,7 @@ meaning the last byte is repeated `matchlength` times.
End of block restrictions
-----------------------
-There are specific rules required to terminate a block.
+There are specific restrictions required to terminate an LZ4 block.
1. The last sequence contains only literals.
The block ends right after them.
@@ -124,33 +126,42 @@ There are specific rules required to terminate a block.
an independent block < 13 bytes cannot be compressed,
because the match must copy "something",
so it needs at least one prior byte.
- - When a block can reference data from another block,
+ - However, when a block can reference data from another block,
it can start immediately with a match and no literal,
- so a block of 12 bytes can be compressed.
+ therefore a block of exactly 12 bytes can be compressed.
When a block does not respect these end conditions,
a conformant decoder is allowed to reject the block as incorrect.
-These rules are in place to ensure that a conformant decoder
-can be designed for speed, issuing speculatively instructions,
-while never reading nor writing beyond provided I/O buffers.
-
+These rules are in place to ensure compatibility with
+a wide range of historical decoders
+which rely on these conditions in their speed-oriented design.
Additional notes
-----------------------
-If the decoder will decompress data from an external source,
-it is recommended to ensure that the decoder will not be vulnerable to
-buffer overflow manipulations.
+If the decoder will decompress data from any external source,
+it is recommended to ensure that the decoder is resilient to corrupted data,
+and typically not vulnerable to buffer overflow manipulations.
Always ensure that read and write operations
remain within the limits of provided buffers.
Test the decoder with fuzzers
-to ensure it's resilient to improbable combinations.
-
-The format makes no assumption nor limits to the way the compressor
+to ensure it's resilient to improbable sequences of conditions.
+Combine them with sanitizers, in order to catch overflows (asan)
+or initialization issues (msan).
+Pay some attention to offset 0 scenario, which is invalid,
+and therefore must not be blindly decoded
+(a naive implementation could preserve destination buffer content,
+which could then result in information disclosure
+if such buffer was uninitialized and still containing private data).
+For reference, in such a scenario, the reference LZ4 decoder
+clears the match segment with `0` bytes,
+though other solutions are certainly possible.
+
+The format makes no assumption nor limits to the way a compressor
searches and selects matches within the source data block.
Multiple techniques can be considered,
featuring distinct time / performance trade offs.
-As long as the format is respected,
+For example, an upper compression limit can be reached,
+using a technique called "full optimal parsing", at very high cpu cost.
+As long as the specified format is respected,
the result will be compatible and decodable by any compliant decoder.
-An upper compression limit can be reached,
-using a technique called "full optimal parsing", at high cpu cost.
diff --git a/lib/lz4.c b/lib/lz4.c
index 95bd349..a2272cf 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -867,7 +867,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
const char* const source,
char* const dest,
const int inputSize,
- int *inputConsumed, /* only written when outputDirective == fillOutput */
+ int* inputConsumed, /* only written when outputDirective == fillOutput */
const int maxOutputSize,
const limitedOutput_directive outputDirective,
const tableType_t tableType,
diff --git a/lib/lz4.h b/lib/lz4.h
index a520adc..7c401f6 100644
--- a/lib/lz4.h
+++ b/lib/lz4.h
@@ -116,17 +116,28 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
/*-************************************
* Tuning parameter
**************************************/
+#define LZ4_MEMORY_USAGE_MIN 10
+#define LZ4_MEMORY_USAGE_DEFAULT 14
+#define LZ4_MEMORY_USAGE_MAX 20
+
/*!
* LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio.
- * Reduced memory usage may improve speed, thanks to better cache locality.
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
+ * Increasing memory usage improves compression ratio, at the cost of speed.
+ * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
#ifndef LZ4_MEMORY_USAGE
-# define LZ4_MEMORY_USAGE 14
+# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
+#endif
+
+#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
+# error "LZ4_MEMORY_USAGE is too small !"
#endif
+#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX)
+# error "LZ4_MEMORY_USAGE is too large !"
+#endif
/*-************************************
* Simple Functions
diff --git a/lib/lz4frame.c b/lib/lz4frame.c
index 73f21fc..f4ea02a 100644
--- a/lib/lz4frame.c
+++ b/lib/lz4frame.c
@@ -485,12 +485,12 @@ struct LZ4F_CDict_s {
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
* LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
* LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after LZ4F_CDict creation, since its content is copied within CDict
+ * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
* @return : digested dictionary for compression, or NULL if failed */
LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
{
const char* dictStart = (const char*)dictBuffer;
- LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
+ LZ4F_CDict* const cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
DEBUGLOG(4, "LZ4F_createCDict");
if (!cdict) return NULL;
if (dictSize > 64 KB) {
diff --git a/lib/lz4frame.h b/lib/lz4frame.h
index 05b8d1d..8b18c8a 100644
--- a/lib/lz4frame.h
+++ b/lib/lz4frame.h
@@ -1,5 +1,5 @@
/*
- LZ4 auto-framing library
+ LZ4F - LZ4-Frame library
Header File
Copyright (C) 2011-2020, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -39,7 +39,7 @@
* LZ4F also offers streaming capabilities.
*
* lz4.h is not required when using lz4frame.h,
- * except to extract common constant such as LZ4_VERSION_NUMBER.
+ * except to extract common constants such as LZ4_VERSION_NUMBER.
* */
#ifndef LZ4F_H_09782039843
@@ -210,7 +210,7 @@ LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
* Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
* Note : this result is only usable with LZ4F_compressFrame().
- * It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed.
+ * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
*/
LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
@@ -230,7 +230,7 @@ LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
* Advanced compression functions
*************************************/
typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
-typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */
+typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */
typedef struct {
unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
diff --git a/lib/lz4hc.c b/lib/lz4hc.c
index ee6fc41..99650a6 100644
--- a/lib/lz4hc.c
+++ b/lib/lz4hc.c
@@ -277,6 +277,8 @@ LZ4HC_InsertAndGetWiderMatch (
/* do nothing */
} else if (matchIndex >= dictLimit) { /* within current Prefix */
const BYTE* const matchPtr = base + matchIndex;
+ DEBUGLOG(2, "matchPtr = %p", matchPtr);
+ DEBUGLOG(2, "lowPrefixPtr = %p", lowPrefixPtr);
assert(matchPtr >= lowPrefixPtr);
assert(matchPtr < ip);
assert(longest >= 1);
diff --git a/programs/lz4io.c b/programs/lz4io.c
index a4c21ee..4d8a624 100644
--- a/programs/lz4io.c
+++ b/programs/lz4io.c
@@ -147,9 +147,6 @@ struct LZ4IO_prefs_s {
/**************************************
* Version modifiers
**************************************/
-#define EXTENDED_ARGUMENTS
-#define EXTENDED_HELP
-#define EXTENDED_FORMAT
#define DEFAULT_DECOMPRESSOR LZ4IO_decompressLZ4F
@@ -377,6 +374,10 @@ static FILE* LZ4IO_openDstFile(const char* dstFileName, const LZ4IO_prefs_t* con
* Legacy Compression
***************************************/
+/* Size in bytes of a legacy block header in little-endian format */
+#define LZ4IO_LEGACY_BLOCK_HEADER_SIZE 4
+#define LZ4IO_LEGACY_BLOCK_SIZE_MAX (8 MB)
+
/* unoptimized version; solves endianness & alignment issues */
static void LZ4IO_writeLE32 (void* p, unsigned value32)
{
@@ -944,7 +945,9 @@ static void LZ4IO_fwriteSparseEnd(FILE* file, unsigned storedSkips)
static unsigned g_magicRead = 0; /* out-parameter of LZ4IO_decodeLegacyStream() */
-static unsigned long long LZ4IO_decodeLegacyStream(FILE* finput, FILE* foutput, const LZ4IO_prefs_t* prefs)
+
+static unsigned long long
+LZ4IO_decodeLegacyStream(FILE* finput, FILE* foutput, const LZ4IO_prefs_t* prefs)
{
unsigned long long streamSize = 0;
unsigned storedSkips = 0;
@@ -959,11 +962,12 @@ static unsigned long long LZ4IO_decodeLegacyStream(FILE* finput, FILE* foutput,
unsigned int blockSize;
/* Block Size */
- { size_t const sizeCheck = fread(in_buff, 1, 4, finput);
+ { size_t const sizeCheck = fread(in_buff, 1, LZ4IO_LEGACY_BLOCK_HEADER_SIZE, finput);
if (sizeCheck == 0) break; /* Nothing to read : file read is completed */
- if (sizeCheck != 4) EXM_THROW(52, "Read error : cannot access block size "); }
- blockSize = LZ4IO_readLE32(in_buff); /* Convert to Little Endian */
- if (blockSize > LZ4_COMPRESSBOUND(LEGACY_BLOCKSIZE)) {
+ if (sizeCheck != LZ4IO_LEGACY_BLOCK_HEADER_SIZE) EXM_THROW(52, "Read error : cannot access block size ");
+ }
+ blockSize = LZ4IO_readLE32(in_buff); /* Convert to Little Endian */
+ if (blockSize > LZ4_COMPRESSBOUND(LEGACY_BLOCKSIZE)) {
/* Cannot read next block : maybe new stream ? */
g_magicRead = blockSize;
break;
@@ -971,7 +975,7 @@ static unsigned long long LZ4IO_decodeLegacyStream(FILE* finput, FILE* foutput,
/* Read Block */
{ size_t const sizeCheck = fread(in_buff, 1, blockSize, finput);
- if (sizeCheck!=blockSize) EXM_THROW(52, "Read error : cannot access compressed block !"); }
+ if (sizeCheck != blockSize) EXM_THROW(52, "Read error : cannot access compressed block !"); }
/* Decode Block */
{ int const decodeSize = LZ4_decompress_safe(in_buff, out_buff, (int)blockSize, LEGACY_BLOCKSIZE);
@@ -1155,6 +1159,7 @@ static int fseek_u32(FILE *fp, unsigned offset, int where)
}
#define ENDOFSTREAM ((unsigned long long)-1)
+#define DECODING_ERROR ((unsigned long long)-2)
static unsigned long long
selectDecoder(dRess_t ress,
FILE* finput, FILE* foutput,
@@ -1200,7 +1205,6 @@ selectDecoder(dRess_t ress,
EXM_THROW(43, "Stream error : cannot skip skippable area");
}
return 0;
- EXTENDED_FORMAT; /* macro extension for custom formats */
default:
if (nbFrames == 1) { /* just started */
/* Wrong magic number at the beginning of 1st stream */
@@ -1216,7 +1220,7 @@ selectDecoder(dRess_t ress,
DISPLAYLEVEL(2, "at position %i ", (int)position);
DISPLAYLEVEL(2, "\n");
}
- return ENDOFSTREAM;
+ return DECODING_ERROR;
}
}
@@ -1228,6 +1232,7 @@ LZ4IO_decompressSrcFile(dRess_t ress,
{
FILE* const foutput = ress.dstFile;
unsigned long long filesize = 0;
+ int result = 0;
/* Init */
FILE* const finput = LZ4IO_openSrcFile(input_filename);
@@ -1239,6 +1244,7 @@ LZ4IO_decompressSrcFile(dRess_t ress,
unsigned long long const decodedSize =
selectDecoder(ress, finput, foutput, prefs);
if (decodedSize == ENDOFSTREAM) break;
+ if (decodedSize == DECODING_ERROR) { result=1; break; }
filesize += decodedSize;
}
@@ -1254,7 +1260,7 @@ LZ4IO_decompressSrcFile(dRess_t ress,
DISPLAYLEVEL(2, "%-20.20s : decoded %llu bytes \n", input_filename, filesize);
(void)output_filename;
- return 0;
+ return result;
}
@@ -1263,6 +1269,7 @@ LZ4IO_decompressDstFile(dRess_t ress,
const char* input_filename, const char* output_filename,
const LZ4IO_prefs_t* const prefs)
{
+ int result;
stat_t statbuf;
int stat_result = 0;
FILE* const foutput = LZ4IO_openDstFile(output_filename, prefs);
@@ -1273,7 +1280,7 @@ LZ4IO_decompressDstFile(dRess_t ress,
stat_result = 1;
ress.dstFile = foutput;
- LZ4IO_decompressSrcFile(ress, input_filename, output_filename, prefs);
+ result = LZ4IO_decompressSrcFile(ress, input_filename, output_filename, prefs);
fclose(foutput);
@@ -1285,7 +1292,7 @@ LZ4IO_decompressDstFile(dRess_t ress,
/* should return value be read ? or is silent fail good enough ? */
}
- return 0;
+ return result;
}
@@ -1294,14 +1301,14 @@ int LZ4IO_decompressFilename(const char* input_filename, const char* output_file
dRess_t const ress = LZ4IO_createDResources(prefs);
clock_t const start = clock();
- int const missingFiles = LZ4IO_decompressDstFile(ress, input_filename, output_filename, prefs);
+ int const status = LZ4IO_decompressDstFile(ress, input_filename, output_filename, prefs);
clock_t const end = clock();
double const seconds = (double)(end - start) / CLOCKS_PER_SEC;
DISPLAYLEVEL(4, "Done in %.2f sec \n", seconds);
LZ4IO_freeDResources(ress);
- return missingFiles;
+ return status;
}
@@ -1424,37 +1431,47 @@ LZ4IO_skipBlocksData(FILE* finput,
return totalBlocksSize;
}
+static const unsigned long long legacyFrameUndecodable = (0ULL-1);
/* For legacy frames only.
Read block headers and skip block data.
Return total blocks size for this frame including block headers.
- or 0 in case it can't successfully skip block data.
+ or legacyFrameUndecodable in case it can't successfully skip block data.
This works as long as legacy block header size = magic number size.
Assumes SEEK_CUR after frame header.
*/
static unsigned long long LZ4IO_skipLegacyBlocksData(FILE* finput)
{
- unsigned char blockInfo[LZIO_LEGACY_BLOCK_HEADER_SIZE];
+ unsigned char blockInfo[LZ4IO_LEGACY_BLOCK_HEADER_SIZE];
unsigned long long totalBlocksSize = 0;
- LZ4IO_STATIC_ASSERT(LZIO_LEGACY_BLOCK_HEADER_SIZE == MAGICNUMBER_SIZE);
+ LZ4IO_STATIC_ASSERT(LZ4IO_LEGACY_BLOCK_HEADER_SIZE == MAGICNUMBER_SIZE);
for (;;) {
- if (!fread(blockInfo, 1, LZIO_LEGACY_BLOCK_HEADER_SIZE, finput)) {
+ size_t const bhs = fread(blockInfo, 1, LZ4IO_LEGACY_BLOCK_HEADER_SIZE, finput);
+ if (bhs == 0) {
if (feof(finput)) return totalBlocksSize;
- return 0;
+ return legacyFrameUndecodable;
+ }
+ if (bhs != 4) {
+ return legacyFrameUndecodable;
}
{ const unsigned int nextCBlockSize = LZ4IO_readLE32(&blockInfo);
- if ( nextCBlockSize == LEGACY_MAGICNUMBER ||
- nextCBlockSize == LZ4IO_MAGICNUMBER ||
- LZ4IO_isSkippableMagicNumber(nextCBlockSize)) {
- /* Rewind back. we want cursor at the beginning of next frame.*/
- if (fseek(finput, -LZIO_LEGACY_BLOCK_HEADER_SIZE, SEEK_CUR) != 0) {
- return 0;
+ if ( nextCBlockSize == LEGACY_MAGICNUMBER
+ || nextCBlockSize == LZ4IO_MAGICNUMBER
+ || LZ4IO_isSkippableMagicNumber(nextCBlockSize) ) {
+ /* Rewind back. we want cursor at the beginning of next frame */
+ if (UTIL_fseek(finput, -LZ4IO_LEGACY_BLOCK_HEADER_SIZE, SEEK_CUR) != 0) {
+ EXM_THROW(37, "impossible to skip backward");
}
break;
}
- totalBlocksSize += LZIO_LEGACY_BLOCK_HEADER_SIZE + nextCBlockSize;
- /* skip to the next block */
+ if (nextCBlockSize > LZ4IO_LEGACY_BLOCK_SIZE_MAX) {
+ DISPLAYLEVEL(4, "Error : block in legacy frame is too large \n");
+ return legacyFrameUndecodable;
+ }
+ totalBlocksSize += LZ4IO_LEGACY_BLOCK_HEADER_SIZE + nextCBlockSize;
+ /* skip to the next block
+ * note : this won't fail if nextCBlockSize is too large, skipping past the end of finput */
if (UTIL_fseek(finput, nextCBlockSize, SEEK_CUR) != 0) {
- return 0;
+ return legacyFrameUndecodable;
} } }
return totalBlocksSize;
}
@@ -1578,6 +1595,11 @@ LZ4IO_getCompressedFileInfo(LZ4IO_cFileInfo_t* cfinfo, const char* input_filenam
cfinfo->eqBlockTypes = 0;
cfinfo->allContentSize = 0;
{ const unsigned long long totalBlocksSize = LZ4IO_skipLegacyBlocksData(finput);
+ if (totalBlocksSize == legacyFrameUndecodable) {
+ DISPLAYLEVEL(1, "Corrupted legacy frame \n");
+ result = LZ4IO_format_not_known;
+ break;
+ }
if (totalBlocksSize) {
DISPLAYLEVEL(3, " %6llu %14s %5s %8s %20llu %20s %9s\n",
cfinfo->frameCount + 1,
@@ -1614,6 +1636,7 @@ LZ4IO_getCompressedFileInfo(LZ4IO_cFileInfo_t* cfinfo, const char* input_filenam
DISPLAYLEVEL(3, "Stream followed by undecodable data ");
if (position != -1L)
DISPLAYLEVEL(3, "at position %i ", (int)position);
+ result = LZ4IO_format_not_known;
DISPLAYLEVEL(3, "\n");
}
break;
@@ -1641,7 +1664,7 @@ int LZ4IO_displayCompressedFilesInfo(const char** inFileNames, size_t ifnIdx)
cfinfo.fileName = LZ4IO_baseName(inFileNames[idx]);
if ((strcmp(inFileNames[idx], stdinmark) == 0) ? !UTIL_isRegFD(0) : !UTIL_isRegFile(inFileNames[idx])) {
DISPLAYLEVEL(1, "lz4: %s is not a regular file \n", inFileNames[idx]);
- return 0;
+ return 1;
}
DISPLAYLEVEL(3, "%s(%llu/%llu)\n", cfinfo.fileName, (unsigned long long)idx + 1, (unsigned long long)ifnIdx);
DISPLAYLEVEL(3, " %6s %14s %5s %8s %20s %20s %9s\n",
@@ -1650,7 +1673,7 @@ int LZ4IO_displayCompressedFilesInfo(const char** inFileNames, size_t ifnIdx)
if (op_result != LZ4IO_LZ4F_OK) {
assert(op_result == LZ4IO_format_not_known);
DISPLAYLEVEL(1, "lz4: %s: File format not recognized \n", inFileNames[idx]);
- return 0;
+ return 1;
} }
DISPLAYLEVEL(3, "\n");
if (g_displayLevel < 3) {
diff --git a/programs/lz4io.h b/programs/lz4io.h
index b68f085..0cfb1d2 100644
--- a/programs/lz4io.h
+++ b/programs/lz4io.h
@@ -57,8 +57,6 @@ typedef struct LZ4IO_prefs_s LZ4IO_prefs_t;
LZ4IO_prefs_t* LZ4IO_defaultPreferences(void);
void LZ4IO_freePreferences(LZ4IO_prefs_t* prefs);
-/* Size in bytes of a legacy block header in little-endian format */
-#define LZIO_LEGACY_BLOCK_HEADER_SIZE 4
/* ************************************************** */
/* ****************** Functions ********************* */
diff --git a/tests/Makefile b/tests/Makefile
index b4df3e3..b4d40ca 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -174,14 +174,22 @@ test: test-lz4 test-lz4c test-frametest test-fullbench test-fuzzer test-install
test32: CFLAGS+=-m32
test32: test
+.PHONY: test-amalgamation
test-amalgamation: lz4_all.o
lz4_all.c: $(LZ4DIR)/lz4.c $(LZ4DIR)/lz4hc.c $(LZ4DIR)/lz4frame.c
$(CAT) $^ > $@
+.PHONY: test-install
test-install: lz4 lib liblz4.pc
lz4_root=.. ./test_install.sh
+.PHONY: test-compile-with-lz4-memory-usage
+test-compile-with-lz4-memory-usage:
+ $(MAKE) clean; CFLAGS=-O0 CPPFLAGS=-D'LZ4_MEMORY_USAGE=LZ4_MEMORY_USAGE_MIN' $(MAKE) all
+ $(MAKE) clean; CFLAGS=-O0 CPPFLAGS=-D'LZ4_MEMORY_USAGE=LZ4_MEMORY_USAGE_MAX' $(MAKE) all
+
+.PHONY: test-lz4-sparse
test-lz4-sparse: lz4 datagen
@echo "\n ---- test sparse file support ----"
$(DATAGEN) -g5M -P100 > tmplsdg5M
@@ -362,7 +370,7 @@ test-lz4-basic: lz4 datagen unlz4 lz4cat
$(LZ4) --list tmp-tlb-hw.lz4 # test --list on valid single-frame file
$(LZ4) --list < tmp-tlb-hw.lz4 # test --list from stdin (file only)
$(CAT) tmp-tlb-hw >> tmp-tlb-hw.lz4
- $(LZ4) -f tmp-tlb-hw.lz4 # uncompress valid frame followed by invalid data
+ ! $(LZ4) -f tmp-tlb-hw.lz4 # uncompress valid frame followed by invalid data (must fail now)
$(LZ4) -BX tmp-tlb-hw -c -q | $(LZ4) -tv # test block checksum
# $(DATAGEN) -g20KB generates the same file every single time
# cannot save output of $(DATAGEN) -g20KB as input file to lz4 because the following shell commands are run before $(DATAGEN) -g20KB
diff --git a/tests/frametest.c b/tests/frametest.c
index e0fff0e..09def51 100644
--- a/tests/frametest.c
+++ b/tests/frametest.c
@@ -535,8 +535,9 @@ int basicTests(U32 seed, double compressibility)
}
/* Dictionary compression test */
- { size_t const dictSize = 63 KB;
- size_t const dstCapacity = LZ4F_compressFrameBound(dictSize, NULL);
+ { size_t const dictSize = 7 KB; /* small enough for LZ4_MEMORY_USAGE == 10 */
+ size_t const srcSize = 65 KB; /* must be > 64 KB to avoid short-size optimizations */
+ size_t const dstCapacity = LZ4F_compressFrameBound(srcSize, NULL);
size_t cSizeNoDict, cSizeWithDict;
LZ4F_CDict* const cdict = LZ4F_createCDict(CNBuffer, dictSize);
if (cdict == NULL) goto _output_error;
@@ -545,7 +546,7 @@ int basicTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "LZ4F_compressFrame_usingCDict, with NULL dict : ");
CHECK_V(cSizeNoDict,
LZ4F_compressFrame_usingCDict(cctx, compressedBuffer, dstCapacity,
- CNBuffer, dictSize,
+ CNBuffer, srcSize,
NULL, NULL) );
DISPLAYLEVEL(3, "%u bytes \n", (unsigned)cSizeNoDict);
@@ -554,19 +555,19 @@ int basicTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "LZ4F_compressFrame_usingCDict, with dict : ");
CHECK_V(cSizeWithDict,
LZ4F_compressFrame_usingCDict(cctx, compressedBuffer, dstCapacity,
- CNBuffer, dictSize,
+ CNBuffer, srcSize,
cdict, NULL) );
DISPLAYLEVEL(3, "compressed %u bytes into %u bytes \n",
- (unsigned)dictSize, (unsigned)cSizeWithDict);
- if ((LZ4_DISTANCE_MAX > dictSize) && (cSizeWithDict >= cSizeNoDict)) {
+ (unsigned)srcSize, (unsigned)cSizeWithDict);
+ if (cSizeWithDict > cSizeNoDict) {
DISPLAYLEVEL(3, "cSizeWithDict (%zu) should have been more compact than cSizeNoDict(%zu) \n", cSizeWithDict, cSizeNoDict);
goto _output_error; /* must be more efficient */
}
- crcOrig = XXH64(CNBuffer, dictSize, 0);
+ crcOrig = XXH64(CNBuffer, srcSize, 0);
DISPLAYLEVEL(3, "LZ4F_decompress_usingDict : ");
{ LZ4F_dctx* dctx;
- size_t decodedSize = COMPRESSIBLE_NOISE_LENGTH;
+ size_t decodedSize = srcSize;
size_t compressedSize = cSizeWithDict;
CHECK( LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION) );
CHECK( LZ4F_decompress_usingDict(dctx,
@@ -575,7 +576,7 @@ int basicTests(U32 seed, double compressibility)
CNBuffer, dictSize,
NULL) );
if (compressedSize != cSizeWithDict) goto _output_error;
- if (decodedSize != dictSize) goto _output_error;
+ if (decodedSize != srcSize) goto _output_error;
{ U64 const crcDest = XXH64(decodedBuffer, decodedSize, 0);
if (crcDest != crcOrig) goto _output_error; }
DISPLAYLEVEL(3, "Regenerated %u bytes \n", (U32)decodedSize);
diff --git a/tests/test-lz4-list.py b/tests/test-lz4-list.py
index b8d844a..fe11682 100644
--- a/tests/test-lz4-list.py
+++ b/tests/test-lz4-list.py
@@ -5,12 +5,13 @@ import glob
import os
import tempfile
import unittest
+import sys
SIZES = [3, 11] # Always 2 sizes
MIB = 1048576
-LZ4 = os.path.dirname(os.path.realpath(__file__)) + "/../lz4"
+LZ4 = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../lz4")
if not os.path.exists(LZ4):
- LZ4 = os.path.dirname(os.path.realpath(__file__)) + "/../programs/lz4"
+ LZ4 = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../programs/lz4")
TEMP = tempfile.gettempdir()
@@ -19,26 +20,29 @@ class NVerboseFileInfo(object):
self.line = line_in
splitlines = line_in.split()
if len(splitlines) != 7:
- errout("Unexpected line: {}".format(line_in))
+ errout(f"Unexpected line: {line_in}")
self.frames, self.type, self.block, self.compressed, self.uncompressed, self.ratio, self.filename = splitlines
self.exp_unc_size = 0
# Get real file sizes
if "concat-all" in self.filename or "2f--content-size" in self.filename:
for i in SIZES:
- self.exp_unc_size += os.path.getsize("{}/test_list_{}M".format(TEMP, i))
+ self.exp_unc_size += os.path.getsize(f"{TEMP}/test_list_{i}M")
else:
uncompressed_filename = self.filename.split("-")[0]
- self.exp_unc_size += os.path.getsize("{}/{}".format(TEMP, uncompressed_filename))
- self.exp_comp_size = os.path.getsize("{}/{}".format(TEMP, self.filename))
+ self.exp_unc_size += os.path.getsize(f"{TEMP}/{uncompressed_filename}")
+ self.exp_comp_size = os.path.getsize(f"{TEMP}/{self.filename}")
class TestNonVerbose(unittest.TestCase):
@classmethod
def setUpClass(self):
self.nvinfo_list = []
- for i, line in enumerate(execute("{} --list -m {}/test_list_*.lz4".format(LZ4, TEMP), print_output=True)):
- if i > 0:
- self.nvinfo_list.append(NVerboseFileInfo(line))
+ test_list_files = glob.glob(f"{TEMP}/test_list_*.lz4")
+ # One of the files has 2 frames so duplicate it in this list to map each frame 1 to a single file
+ for i, filename in enumerate(test_list_files):
+ for i, line in enumerate(execute(f"{LZ4} --list -m {filename}", print_output=True)):
+ if i > 0:
+ self.nvinfo_list.append(NVerboseFileInfo(line))
def test_frames(self):
all_concat_frames = 0
@@ -80,7 +84,7 @@ class TestNonVerbose(unittest.TestCase):
def test_ratio(self):
for nvinfo in self.nvinfo_list:
if "--content-size" in nvinfo.filename:
- self.assertEqual(nvinfo.ratio, "{:.2f}%".format(float(nvinfo.exp_comp_size) / float(nvinfo.exp_unc_size) * 100), nvinfo.line)
+ self.assertEqual(nvinfo.ratio, f"{float(nvinfo.exp_comp_size) / float(nvinfo.exp_unc_size) * 100:.2f}%", nvinfo.line)
def test_uncompressed_size(self):
for nvinfo in self.nvinfo_list:
@@ -112,17 +116,19 @@ class TestVerbose(unittest.TestCase):
# we're only really interested in testing the output of the concat-all file.
self.vinfo_list = []
start = end = 0
- output = execute("{} --list -m -v {}/test_list_concat-all.lz4 {}/test_list_*M-lz4f-2f--content-size.lz4".format(LZ4, TEMP, TEMP), print_output=True)
- for i, line in enumerate(output):
- if line.startswith("test_list"):
- if start != 0 and end != 0:
- self.vinfo_list.append(VerboseFileInfo(output[start:end]))
- start = i
- if not line:
- end = i
+ test_list_SM_lz4f = glob.glob(f"{TEMP}/test_list_*M-lz4f-2f--content-size.lz4")
+ for i, filename in enumerate(test_list_SM_lz4f):
+ output = execute(f"{LZ4} --list -m -v {TEMP}/test_list_concat-all.lz4 {filename}", print_output=True)
+ for i, line in enumerate(output):
+ if line.startswith("test_list"):
+ if start != 0 and end != 0:
+ self.vinfo_list.append(VerboseFileInfo(output[start:end]))
+ start = i
+ if not line:
+ end = i
self.vinfo_list.append(VerboseFileInfo(output[start:end]))
# Populate file_frame_map as a reference of the expected info
- concat_file_list = glob.glob("/tmp/test_list_[!concat]*.lz4")
+ concat_file_list = glob.glob(f"{TEMP}/test_list_[!concat]*.lz4")
# One of the files has 2 frames so duplicate it in this list to map each frame 1 to a single file
for i, filename in enumerate(concat_file_list):
if "2f--content-size" in filename:
@@ -130,11 +136,11 @@ class TestVerbose(unittest.TestCase):
break
self.cvinfo = self.vinfo_list[0]
self.cvinfo.file_frame_map = concat_file_list
- self.cvinfo.compressed_size = os.path.getsize("{}/test_list_concat-all.lz4".format(TEMP))
+ self.cvinfo.compressed_size = os.path.getsize(f"{TEMP}/test_list_concat-all.lz4")
def test_filename(self):
for i, vinfo in enumerate(self.vinfo_list):
- self.assertRegex(vinfo.filename, "^test_list_.*({}/{})".format(i + 1, len(self.vinfo_list)))
+ self.assertRegex(vinfo.filename, f"^test_list_.*({i + 1}/{len(self.vinfo_list)})".format(i + 1, len(self.vinfo_list)))
def test_frame_number(self):
for vinfo in self.vinfo_list:
@@ -169,7 +175,7 @@ class TestVerbose(unittest.TestCase):
expected_size = os.path.getsize(self.cvinfo.file_frame_map[i])
self.assertEqual(self.cvinfo.frame_list[i]["compressed"], str(expected_size), self.cvinfo.frame_list[i]["line"])
total += int(self.cvinfo.frame_list[i]["compressed"])
- self.assertEqual(total, self.cvinfo.compressed_size, "Expected total sum ({}) to match {} filesize".format(total, self.cvinfo.filename))
+ self.assertEqual(total, self.cvinfo.compressed_size, f"Expected total sum ({total}) to match {self.cvinfo.filename} filesize")
def test_uncompressed(self):
for i, frame_info in enumerate(self.cvinfo.frame_list):
@@ -182,7 +188,7 @@ class TestVerbose(unittest.TestCase):
for i, frame_info in enumerate(self.cvinfo.frame_list):
if "--content-size" in self.cvinfo.file_frame_map[i]:
self.assertEqual(self.cvinfo.frame_list[i]['ratio'],
- "{:.2f}%".format(float(self.cvinfo.frame_list[i]['compressed']) / float(self.cvinfo.frame_list[i]['uncompressed']) * 100),
+ f"{float(self.cvinfo.frame_list[i]['compressed']) / float(self.cvinfo.frame_list[i]['uncompressed']) * 100:.2f}%",
self.cvinfo.frame_list[i]["line"])
@@ -191,7 +197,7 @@ def to_human(size):
if size < 1024.0:
break
size /= 1024.0
- return "{:.2f}{}".format(size, unit)
+ return f"{size:.2f}{unit}"
def log(text):
@@ -203,12 +209,12 @@ def errout(text, err=1):
exit(err)
-def execute(command, print_command=True, print_output=False, print_error=True, param_shell=True):
+def execute(command, print_command=True, print_output=False, print_error=True):
if os.environ.get('QEMU_SYS'):
- command = "{} {}".format(os.environ['QEMU_SYS'], command)
+ command = f"{os.environ['QEMU_SYS']} {command}"
if print_command:
log("> " + command)
- popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=param_shell)
+ popen = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_lines, stderr_lines = popen.communicate()
stderr_lines = stderr_lines.decode("utf-8")
stdout_lines = stdout_lines.decode("utf-8")
@@ -220,14 +226,14 @@ def execute(command, print_command=True, print_output=False, print_error=True, p
if popen.returncode is not None and popen.returncode != 0:
if stderr_lines and not print_output and print_error:
print(stderr_lines)
- errout("Failed to run: {}\n".format(command, stdout_lines + stderr_lines))
+ errout(f"Failed to run: {command}, {stdout_lines + stderr_lines}\n")
return (stdout_lines + stderr_lines).splitlines()
def cleanup(silent=False):
- for f in glob.glob("{}/test_list*".format(TEMP)):
+ for f in glob.glob(f"{TEMP}/test_list*"):
if not silent:
- log("Deleting {}".format(f))
+ log(f"Deleting {f}")
os.unlink(f)
@@ -243,33 +249,33 @@ def generate_files():
# file format ~ test_list<frametype>-<no_frames>f<create-args>.lz4 ~
# Generate LZ4Frames
for i in SIZES:
- filename = "{}/test_list_{}M".format(TEMP, i)
- log("Generating {}".format(filename))
+ filename = f"{TEMP}/test_list_{i}M"
+ log(f"Generating {filename}")
datagen(filename, i * MIB)
for j in ["--content-size", "-BI", "-BD", "-BX", "--no-frame-crc"]:
- lz4file = "{}-lz4f-1f{}.lz4".format(filename, j)
- execute("{} {} {} {}".format(LZ4, j, filename, lz4file))
+ lz4file = f"{filename}-lz4f-1f{j}.lz4"
+ execute(f"{LZ4} {j} {filename} {lz4file}")
# Generate skippable frames
- lz4file = "{}-skip-1f.lz4".format(filename)
+ lz4file = f"{filename}-skip-1f.lz4"
skipsize = i * 1024
skipbytes = bytes([80, 42, 77, 24]) + skipsize.to_bytes(4, byteorder='little', signed=False)
with open(lz4file, 'wb') as f:
f.write(skipbytes)
f.write(os.urandom(skipsize))
# Generate legacy frames
- lz4file = "{}-legc-1f.lz4".format(filename)
- execute("{} -l {} {}".format(LZ4, filename, lz4file))
+ lz4file = f"{filename}-legc-1f.lz4"
+ execute(f"{LZ4} -l {filename} {lz4file}")
# Concatenate --content-size files
- file_list = glob.glob("{}/test_list_*-lz4f-1f--content-size.lz4".format(TEMP))
- with open("{}/test_list_{}M-lz4f-2f--content-size.lz4".format(TEMP, sum(SIZES)), 'ab') as outfile:
+ file_list = glob.glob(f"{TEMP}/test_list_*-lz4f-1f--content-size.lz4")
+ with open(f"{TEMP}/test_list_{sum(SIZES)}M-lz4f-2f--content-size.lz4", 'ab') as outfile:
for fname in file_list:
with open(fname, 'rb') as infile:
outfile.write(infile.read())
# Concatenate all files
- file_list = glob.glob("{}/test_list_*.lz4".format(TEMP))
- with open("{}/test_list_concat-all.lz4".format(TEMP), 'ab') as outfile:
+ file_list = glob.glob(f"{TEMP}/test_list_*.lz4")
+ with open(f"{TEMP}/test_list_concat-all.lz4", 'ab') as outfile:
for fname in file_list:
with open(fname, 'rb') as infile:
outfile.write(infile.read())
@@ -278,5 +284,6 @@ def generate_files():
if __name__ == '__main__':
cleanup()
generate_files()
- unittest.main(verbosity=2, exit=False)
+ ret = unittest.main(verbosity=2, exit=False)
cleanup(silent=True)
+ sys.exit(not ret.result.wasSuccessful())