summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYann Collet <yann.collet.73@gmail.com>2016-06-29 12:44:05 (GMT)
committerYann Collet <yann.collet.73@gmail.com>2016-06-29 12:54:07 (GMT)
commite27e7316dd94ed429bfe764408353cc2d958ad71 (patch)
tree071a241defb7e78293061a5908627811d54c7ce1
parent2d7df8b306753bd68a5597327da06c44c8bd80ca (diff)
downloadlz4-e27e7316dd94ed429bfe764408353cc2d958ad71.zip
lz4-e27e7316dd94ed429bfe764408353cc2d958ad71.tar.gz
lz4-e27e7316dd94ed429bfe764408353cc2d958ad71.tar.bz2
minor code refactoring
-rw-r--r--lib/lz4.c70
-rw-r--r--programs/bench.c25
2 files changed, 35 insertions, 60 deletions
diff --git a/lib/lz4.c b/lib/lz4.c
index f162fe1..cf889d5 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -933,7 +933,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
-/********************************
+/*-******************************
* Streaming functions
********************************/
@@ -1103,14 +1103,14 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
-/*******************************
+/*-*****************************
* Decompression functions
*******************************/
-/*
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is essential this generic function is really inlined,
- * in order to remove useless branches during compilation optimization.
+/*! LZ4_decompress_generic() :
+ * This generic decompression function cover all use cases.
+ * It shall be instantiated several times, using different sets of directives
+ * Note that it is important this generic function is really inlined,
+ * in order to remove useless branches during compilation optimization.
*/
FORCE_INLINE int LZ4_decompress_generic(
const char* const source,
@@ -1152,8 +1152,7 @@ FORCE_INLINE int LZ4_decompress_generic(
/* Main Loop */
- while (1)
- {
+ while (1) {
unsigned token;
size_t length;
const BYTE* match;
@@ -1161,15 +1160,12 @@ FORCE_INLINE int LZ4_decompress_generic(
/* get literal length */
token = *ip++;
- if ((length=(token>>ML_BITS)) == RUN_MASK)
- {
+ if ((length=(token>>ML_BITS)) == RUN_MASK) {
unsigned s;
- do
- {
+ do {
s = *ip++;
length += s;
- }
- while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) && (s==255) );
+ } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) && (s==255) );
if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
}
@@ -1179,13 +1175,10 @@ FORCE_INLINE int LZ4_decompress_generic(
if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)))
{
- if (partialDecoding)
- {
+ if (partialDecoding) {
if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
- }
- else
- {
+ } else {
if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
}
@@ -1204,11 +1197,9 @@ FORCE_INLINE int LZ4_decompress_generic(
/* get matchlength */
length = token & ML_MASK;
- if (length == ML_MASK)
- {
+ if (length == ML_MASK) {
unsigned s;
- do
- {
+ do {
if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
s = *ip++;
length += s;
@@ -1218,31 +1209,24 @@ FORCE_INLINE int LZ4_decompress_generic(
length += MINMATCH;
/* check external dictionary */
- if ((dict==usingExtDict) && (match < lowPrefix))
- {
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
- if (length <= (size_t)(lowPrefix-match))
- {
+ if (length <= (size_t)(lowPrefix-match)) {
/* match can be copied as a single segment from external dictionary */
match = dictEnd - (lowPrefix-match);
memmove(op, match, length); op += length;
- }
- else
- {
+ } else {
/* match encompass external dictionary and current block */
size_t copySize = (size_t)(lowPrefix-match);
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
copySize = length - copySize;
- if (copySize > (size_t)(op-lowPrefix)) /* overlap copy */
- {
+ if (copySize > (size_t)(op-lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + copySize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) *op++ = *copyFrom++;
- }
- else
- {
+ } else {
memcpy(op, lowPrefix, copySize);
op += copySize;
}
@@ -1252,8 +1236,7 @@ FORCE_INLINE int LZ4_decompress_generic(
/* copy match within block */
cpy = op + length;
- if (unlikely(offset<8))
- {
+ if (unlikely(offset<8)) {
const int dec64 = dec64table[offset];
op[0] = match[0];
op[1] = match[1];
@@ -1265,12 +1248,10 @@ FORCE_INLINE int LZ4_decompress_generic(
} else { LZ4_copy8(op, match); match+=8; }
op += 8;
- if (unlikely(cpy>oend-12))
- {
+ if (unlikely(cpy>oend-12)) {
BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
- if (op < oCopyLimit)
- {
+ if (op < oCopyLimit) {
LZ4_wildCopy(op, match, oCopyLimit);
match += oCopyLimit - op;
op = oCopyLimit;
@@ -1310,10 +1291,9 @@ int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
}
-/* streaming decompression functions */
+/*===== streaming decompression functions =====*/
-typedef struct
-{
+typedef struct {
const BYTE* externalDict;
size_t extDictSize;
const BYTE* prefixEnd;
diff --git a/programs/bench.c b/programs/bench.c
index d11f487..52b70fe 100644
--- a/programs/bench.c
+++ b/programs/bench.c
@@ -29,7 +29,6 @@
#if defined(_MSC_VER) || defined(_WIN32)
# define _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_DEPRECATE /* VS2005 */
-# define BMK_LEGACY_TIMER 1 /* S_ISREG & gettimeofday() are not supported by MSVC */
#endif
/* Unix Large Files support (>4GB) */
@@ -131,15 +130,15 @@ struct compressionParameters
* Benchmark Parameters
***************************************/
static int g_chunkSize = DEFAULT_CHUNKSIZE;
-static int nbIterations = NBLOOPS;
+static int g_nbIterations = NBLOOPS;
static int BMK_pause = 0;
void BMK_setBlocksize(int bsize) { g_chunkSize = bsize; }
void BMK_setNbIterations(int nbLoops)
{
- nbIterations = nbLoops;
- DISPLAY("- %i iterations -\n", nbIterations);
+ g_nbIterations = nbLoops;
+ DISPLAY("- %i iterations -\n", g_nbIterations);
}
void BMK_setPause(void) { BMK_pause = 1; }
@@ -156,11 +155,10 @@ static clock_t BMK_getClockSpan (clock_t clockStart)
return clock() - clockStart;
}
-
static size_t BMK_findMaxMem(U64 requiredMem)
{
- size_t step = 64 MB;
- BYTE* testmem=NULL;
+ size_t const step = 64 MB;
+ void* testmem = NULL;
requiredMem = (((requiredMem >> 26) + 1) << 26);
requiredMem += 2*step;
@@ -169,7 +167,7 @@ static size_t BMK_findMaxMem(U64 requiredMem)
while (!testmem) {
if (requiredMem > step) requiredMem -= step;
else requiredMem >>= 1;
- testmem = (BYTE*) malloc ((size_t)requiredMem);
+ testmem = malloc ((size_t)requiredMem);
}
free (testmem);
@@ -180,7 +178,6 @@ static size_t BMK_findMaxMem(U64 requiredMem)
return (size_t)requiredMem;
}
-
static U64 BMK_GetFileSize(const char* infilename)
{
int r;
@@ -212,7 +209,6 @@ int BMK_benchFiles(const char** fileNamesTable, int nbFiles, int cLevel)
double totalc = 0.;
double totald = 0.;
-
/* Init */
if (cLevel <= 2) cfunctionId = 0; else cfunctionId = 1;
switch (cfunctionId)
@@ -249,7 +245,7 @@ int BMK_benchFiles(const char** fileNamesTable, int nbFiles, int cLevel)
DISPLAY("Not enough memory for '%s' full size; testing %u MB only...\n", inFileName, (unsigned)(benchedSize>>20));
}
- /* Alloc */
+ /* Allocation */
nbChunks = (unsigned)(benchedSize / g_chunkSize) + 1;
chunkP = (struct chunkParameters*) malloc(nbChunks * sizeof(struct chunkParameters));
orig_buff = (char*)malloc(benchedSize);
@@ -303,8 +299,8 @@ int BMK_benchFiles(const char** fileNamesTable, int nbFiles, int cLevel)
U32 crcCheck = 0;
DISPLAY("\r%79s\r", "");
- for (loopNb = 1; loopNb <= nbIterations; loopNb++) {
- int nbLoops;
+ for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
+ int nbLoops = 0;
clock_t clockStart, clockEnd;
unsigned chunkNb;
@@ -312,7 +308,6 @@ int BMK_benchFiles(const char** fileNamesTable, int nbFiles, int cLevel)
DISPLAY("%1i-%-14.14s : %9i ->\r", loopNb, inFileName, (int)benchedSize);
{ size_t i; for (i=0; i<benchedSize; i++) compressedBuffer[i]=(char)i; } /* warmimg up memory */
- nbLoops = 0;
clockStart = clock();
while (clock() == clockStart);
clockStart = clock();
@@ -357,7 +352,7 @@ int BMK_benchFiles(const char** fileNamesTable, int nbFiles, int cLevel)
}
if (crcOrig==crcCheck) {
- if (ratio<100.)
+ if (ratio < 100.)
DISPLAY("%-16.16s : %9i -> %9i (%5.2f%%),%7.1f MB/s ,%7.1f MB/s \n",
inFileName, (int)benchedSize, (int)cSize, ratio,
(double)benchedSize / (fastestC / CLOCKS_PER_SEC) / 1000000, (double)benchedSize / (fastestD / CLOCKS_PER_SEC) / 1000000 );