diff options
author | Yann Collet <Cyan4973@users.noreply.github.com> | 2017-11-09 22:13:30 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-11-09 22:13:30 (GMT) |
commit | dac26084a902657f9b530b09c3a21e000249109b (patch) | |
tree | 39a25ffc21da8f40705677873f984d6e8428a2ef /lib | |
parent | e6365b1854eddb9772693db50b07e9057d0fe4f6 (diff) | |
parent | dc3ed5b6a7d9cc7d251a1942558e37793d5575b8 (diff) | |
download | lz4-dac26084a902657f9b530b09c3a21e000249109b.zip lz4-dac26084a902657f9b530b09c3a21e000249109b.tar.gz lz4-dac26084a902657f9b530b09c3a21e000249109b.tar.bz2 |
Merge pull request #416 from lz4/newopt
Improve Optimal parser
Diffstat (limited to 'lib')
-rw-r--r-- | lib/README.md | 2 | ||||
-rw-r--r-- | lib/lz4.c | 20 | ||||
-rw-r--r-- | lib/lz4frame.c | 8 | ||||
-rw-r--r-- | lib/lz4hc.c | 261 | ||||
-rw-r--r-- | lib/lz4hc.h | 50 | ||||
-rw-r--r-- | lib/lz4opt.h | 465 |
6 files changed, 415 insertions, 391 deletions
diff --git a/lib/README.md b/lib/README.md index 7082fe3..fc5d4e9 100644 --- a/lib/README.md +++ b/lib/README.md @@ -15,7 +15,7 @@ They generate and decode data using [LZ4 block format]. For more compression ratio at the cost of compression speed, the High Compression variant called **lz4hc** is available. -Add files **`lz4hc.c`** and **`lz4hc.h`**. +Add files **`lz4hc.c`**, **`lz4hc.h`** and **`lz4opt.h`**. The variant still depends on regular `lib/lz4.*` source files. @@ -323,15 +323,24 @@ static const int LZ4_minLength = (MFLIMIT+1); /*-************************************ * Error detection **************************************/ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) +# include <assert.h> +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) # include <stdio.h> -# define DEBUGLOG(l, ...) { \ - if (l<=LZ4_DEBUG) { \ - fprintf(stderr, __FILE__ ": "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ } } #else # define DEBUGLOG(l, ...) {} /* disabled */ @@ -990,6 +999,7 @@ LZ4_stream_t* LZ4_createStream(void) void LZ4_resetStream (LZ4_stream_t* LZ4_stream) { + DEBUGLOG(4, "LZ4_resetStream"); MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); } diff --git a/lib/lz4frame.c b/lib/lz4frame.c index 3adbdd9..ebd1089 100644 --- a/lib/lz4frame.c +++ b/lib/lz4frame.c @@ -322,7 +322,7 @@ size_t LZ4F_compressFrame_usingCDict(void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* preferencesPtr) { LZ4F_cctx_t cctxI; - LZ4_stream_t lz4ctx; + LZ4_stream_t lz4ctx; /* pretty large on stack */ LZ4F_preferences_t prefs; LZ4F_compressOptions_t options; BYTE* const dstStart = (BYTE*) dstBuffer; @@ -504,15 +504,15 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, cctxPtr->prefs = *preferencesPtr; /* Ctx Management */ - { U32 const tableID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ - if (cctxPtr->lz4CtxLevel < tableID) { + { U32 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ + if (cctxPtr->lz4CtxLevel < ctxTypeID) { FREEMEM(cctxPtr->lz4CtxPtr); if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) cctxPtr->lz4CtxPtr = (void*)LZ4_createStream(); else cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC(); if (cctxPtr->lz4CtxPtr == NULL) return err0r(LZ4F_ERROR_allocation_failed); - cctxPtr->lz4CtxLevel = tableID; + cctxPtr->lz4CtxLevel = ctxTypeID; } } /* Buffer Management */ diff --git a/lib/lz4hc.c b/lib/lz4hc.c index d7f8d23..60690a0 100644 --- a/lib/lz4hc.c +++ b/lib/lz4hc.c @@ -49,6 +49,7 @@ /*=== Dependency ===*/ +#define LZ4_HC_STATIC_LINKING_ONLY #include "lz4hc.h" @@ -116,54 +117,71 @@ LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) hc4->nextToUpdate = target; } - -LZ4_FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ - const BYTE* const ip, const BYTE* const iLimit, - const BYTE** matchpos, - const int maxNbAttempts) +/** LZ4HC_countBack() : + * @return : negative value, nb of common bytes before ip/match */ +LZ4_FORCE_INLINE +int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, + const BYTE* const iMin, const BYTE* const mMin) { - U16* const chainTable = hc4->chainTable; - U32* const HashTable = hc4->hashTable; - const BYTE* const base = hc4->base; - const BYTE* const dictBase = hc4->dictBase; - const U32 dictLimit = hc4->dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); - U32 matchIndex; - int nbAttempts = maxNbAttempts; - size_t ml = 0; + int back=0; + while ( (ip+back > iMin) + && (match+back > mMin) + && (ip[back-1] == match[back-1])) + back--; + return back; +} - /* HC4 match finder */ - LZ4HC_Insert(hc4, ip); - matchIndex = HashTable[LZ4HC_hashPtr(ip)]; +/* LZ4HC_countPattern() : + * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ +static unsigned LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) +{ + const BYTE* const iStart = ip; + reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32; + + while (likely(ip < iEnd-(sizeof(pattern)-1))) { + reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; + if (!diff) { ip+=sizeof(pattern); continue; } + ip += LZ4_NbCommonBytes(diff); + return (unsigned)(ip - iStart); + } - while ((matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - const BYTE* const match = base + matchIndex; - if ( (*(match+ml) == *(ip+ml)) /* can be longer */ - && (LZ4_read32(match) == LZ4_read32(ip)) ) - { - size_t const mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH; - if (mlt > ml) { ml = mlt; *matchpos = match; } - } - } else { - const BYTE* const match = dictBase + matchIndex; - if (LZ4_read32(match) == LZ4_read32(ip)) { - size_t mlt; - const BYTE* vLimit = ip + (dictLimit - matchIndex); - if (vLimit > iLimit) vLimit = iLimit; - mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH; - if ((ip+mlt == vLimit) && (vLimit < iLimit)) - mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit); - if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; } /* virtual matchpos */ - } + if (LZ4_isLittleEndian()) { + reg_t patternByte = pattern; + while ((ip<iEnd) && (*ip == (BYTE)patternByte)) { + ip++; patternByte >>= 8; + } + } else { /* big endian */ + U32 bitOffset = (sizeof(pattern)*8) - 8; + while (ip < iEnd) { + BYTE const byte = (BYTE)(pattern >> bitOffset); + if (*ip != byte) break; + ip ++; bitOffset -= 8; } - matchIndex -= DELTANEXTU16(chainTable, matchIndex); } - return (int)ml; + return (unsigned)(ip - iStart); +} + +/* LZ4HC_reverseCountPattern() : + * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) + * read using natural platform endianess */ +static unsigned LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) +{ + const BYTE* const iStart = ip; + + while (likely(ip >= iLow+4)) { + if (LZ4_read32(ip-4) != pattern) break; + ip -= 4; + } + { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */ + while (likely(ip>iLow)) { + if (ip[-1] != *bytePtr) break; + ip--; bytePtr--; + } } + return (unsigned)(iStart - ip); } +typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( LZ4HC_CCtx_internal* hc4, @@ -180,60 +198,117 @@ LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( const BYTE* const base = hc4->base; const U32 dictLimit = hc4->dictLimit; const BYTE* const lowPrefixPtr = base + dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); + const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - MAX_DISTANCE; const BYTE* const dictBase = hc4->dictBase; int const delta = (int)(ip-iLowLimit); int nbAttempts = maxNbAttempts; + U32 const pattern = LZ4_read32(ip); U32 matchIndex; + repeat_state_e repeat = rep_untested; + size_t srcPatternLength = 0; - + DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); /* First Match */ LZ4HC_Insert(hc4, ip); matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + DEBUGLOG(7, "First match at index %u / %u (lowLimit)", + matchIndex, lowLimit); while ((matchIndex>=lowLimit) && (nbAttempts)) { + DEBUGLOG(7, "remaining attempts : %i", nbAttempts); nbAttempts--; if (matchIndex >= dictLimit) { const BYTE* const matchPtr = base + matchIndex; if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) { - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + if (LZ4_read32(matchPtr) == pattern) { int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); + #if 0 + /* more generic but unfortunately slower on clang */ + int const back = LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr); + #else int back = 0; - while ( (ip+back > iLowLimit) && (matchPtr+back > lowPrefixPtr) && (ip[back-1] == matchPtr[back-1])) { back--; } - + #endif mlt -= back; if (mlt > longest) { longest = mlt; *matchpos = matchPtr+back; *startpos = ip+back; - } } } - } else { + } } + } + } else { /* matchIndex < dictLimit */ const BYTE* const matchPtr = dictBase + matchIndex; - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + if (LZ4_read32(matchPtr) == pattern) { int mlt; - int back=0; + int back = 0; const BYTE* vLimit = ip + (dictLimit - matchIndex); if (vLimit > iHighLimit) vLimit = iHighLimit; mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; if ((ip+mlt == vLimit) && (vLimit < iHighLimit)) mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit); - while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == matchPtr[back-1])) back--; + while ( (ip+back > iLowLimit) + && (matchIndex+back > lowLimit) + && (ip[back-1] == matchPtr[back-1])) + back--; mlt -= back; - if (mlt > longest) { longest = mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; } - } - } - matchIndex -= DELTANEXTU16(chainTable, matchIndex); - } + if (mlt > longest) { + longest = mlt; + *matchpos = base + matchIndex + back; + *startpos = ip + back; + } } } + + { U32 const nextOffset = DELTANEXTU16(chainTable, matchIndex); + matchIndex -= nextOffset; + if (nextOffset==1) { + /* may be a repeated pattern */ + if (repeat == rep_untested) { + if ( ((pattern & 0xFFFF) == (pattern >> 16)) + & ((pattern & 0xFF) == (pattern >> 24)) ) { + repeat = rep_confirmed; + srcPatternLength = LZ4HC_countPattern(ip+4, iHighLimit, pattern) + 4; + } else { + repeat = rep_not; + } } + if ( (repeat == rep_confirmed) + && (matchIndex >= dictLimit) ) { /* same segment only */ + const BYTE* const matchPtr = base + matchIndex; + if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ + size_t const forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); + const BYTE* const maxLowPtr = (lowPrefixPtr + MAX_DISTANCE >= ip) ? lowPrefixPtr : ip - MAX_DISTANCE; + size_t const backLength = LZ4HC_reverseCountPattern(matchPtr, maxLowPtr, pattern); + size_t const currentSegmentLength = backLength + forwardPatternLength; + + if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ + && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ + matchIndex += (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ + } else { + matchIndex -= (U32)backLength; /* let's go to farthest segment position, will find a match of length currentSegmentLength + maybe some back */ + } + } } } } + } /* while ((matchIndex>=lowLimit) && (nbAttempts)) */ return longest; } +LZ4_FORCE_INLINE +int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + const BYTE** matchpos, + const int maxNbAttempts) +{ + const BYTE* uselessPtr = ip; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts); +} + + typedef enum { noLimit = 0, @@ -241,10 +316,6 @@ typedef enum { limitedDestSize = 2, } limitedOutput_directive; -#ifndef LZ4HC_DEBUG -# define LZ4HC_DEBUG 0 -#endif - /* LZ4HC_encodeSequence() : * @return : 0 if ok, * 1 if buffer issue detected */ @@ -260,9 +331,21 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( size_t length; BYTE* const token = (*op)++; -#if LZ4HC_DEBUG - printf("literal : %u -- match : %u -- offset : %u\n", - (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match)); +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2) + static const BYTE* start = NULL; + static U32 totalCost = 0; + U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start); + U32 const ll = (U32)(*ip - *anchor); + U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; + U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; + U32 const cost = 1 + llAdd + ll + 2 + mlAdd; + if (start==NULL) start = *anchor; /* only works for single segment */ + //g_debuglog_enable = (pos >= 2228) & (pos <= 2262); + DEBUGLOG(2, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u", + pos, + (U32)(*ip - *anchor), matchLength, (U32)(*ip-match), + cost, totalCost); + totalCost += cost; #endif /* Encode Literal length */ @@ -285,6 +368,7 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2; /* Encode MatchLength */ + assert(matchLength >= MINMATCH); length = (size_t)(matchLength - MINMATCH); if ((limit) && (*op + (length >> 8) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */ if (length >= ML_MASK) { @@ -344,16 +428,13 @@ static int LZ4HC_compress_hashChain ( if (limit == limitedDestSize && maxOutputSize < 1) return 0; /* Impossible to store anything */ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ - ctx->end += inputSize; if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support limitations LZ4 decompressor */ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - ip++; - /* Main Loop */ while (ip < mflimit) { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts); - if (!ml) { ip++; continue; } + ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts); + if (ml<MINMATCH) { ip++; continue; } /* saved, in case we would skip too much */ start0 = ip; @@ -527,14 +608,6 @@ _dest_overflow: return 0; } -static int LZ4HC_getSearchNum(int compressionLevel) -{ - switch (compressionLevel) { - default: return 0; /* unused */ - case 11: return 128; - case 12: return 1<<10; - } -} static int LZ4HC_compress_generic ( LZ4HC_CCtx_internal* const ctx, @@ -546,21 +619,19 @@ static int LZ4HC_compress_generic ( limitedOutput_directive limit ) { - if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe to reconsider */ + ctx->end += *srcSizePtr; + if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ if (cLevel > 9) { if (limit == limitedDestSize) cLevel = 10; switch (cLevel) { case 10: - return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << 12, limit); + return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1<<12, limit); case 11: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 128, 0); + return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 512, 128, 0); default: - cLevel = 12; /* fall-through */ case 12: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, LZ4_OPT_NUM, 1); + return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 1<<13, LZ4_OPT_NUM, 1); } } return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (cLevel-1), limit); /* levels 1-9 */ @@ -596,8 +667,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in } /* LZ4_compress_HC_destSize() : - * currently, only compatible with Hash Chain implementation, - * hence limit compression level to LZ4HC_CLEVEL_OPT_MIN-1*/ + * only compatible with Hash Chain match finder */ int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) { LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; @@ -624,18 +694,13 @@ void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET); /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */ LZ4_streamHCPtr->internal_donotuse.base = NULL; - if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; /* cap compression level */ - LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; - LZ4_streamHCPtr->internal_donotuse.searchNum = LZ4HC_getSearchNum(compressionLevel); + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); } void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { - int const currentCLevel = LZ4_streamHCPtr->internal_donotuse.compressionLevel; - int const minCLevel = currentCLevel < LZ4HC_CLEVEL_OPT_MIN ? 1 : LZ4HC_CLEVEL_OPT_MIN; - int const maxCLevel = currentCLevel < LZ4HC_CLEVEL_OPT_MIN ? LZ4HC_CLEVEL_OPT_MIN-1 : LZ4HC_CLEVEL_MAX; - compressionLevel = MIN(compressionLevel, minCLevel); - compressionLevel = MAX(compressionLevel, maxCLevel); + if (compressionLevel < 1) compressionLevel = 1; + if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; } @@ -648,10 +713,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int } LZ4HC_init (ctxPtr, (const BYTE*)dictionary); ctxPtr->end = (const BYTE*)dictionary + dictSize; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); + if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); return dictSize; } @@ -660,10 +722,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) { - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ ctxPtr->lowLimit = ctxPtr->dictLimit; @@ -717,8 +776,6 @@ int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) { - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) LZ4HC_init(ctxPtr, (const BYTE*)src); /* not compatible with btopt implementation */ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, limitedDestSize); } diff --git a/lib/lz4hc.h b/lib/lz4hc.h index 66d5636..d791062 100644 --- a/lib/lz4hc.h +++ b/lib/lz4hc.h @@ -39,7 +39,7 @@ extern "C" { #endif /* --- Dependency --- */ -/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ +/* note : lz4hc requires lz4.h/lz4.c for compilation */ #include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ @@ -54,12 +54,12 @@ extern "C" { * Block Compression **************************************/ /*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm. + * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm. * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : Recommended values are between 4 and 9, although any value between 1 and LZ4HC_CLEVEL_MAX will work. - * Values >LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. + * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") + * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") + * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. + * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. * @return : the number of bytes written into 'dst' * or 0 if compression fails. */ @@ -72,12 +72,12 @@ LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dst /*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. + * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() will do properly). + * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). */ -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); LZ4LIB_API int LZ4_sizeofStateHC(void); +LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); /*-************************************ @@ -87,10 +87,10 @@ LZ4LIB_API int LZ4_sizeofStateHC(void); typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ /*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * Existing states can be re-used several times, using LZ4_resetStreamHC(). - * These methods are API and ABI stable, they can be used in combination with a DLL. + * These functions create and release memory for LZ4 HC streaming state. + * Newly created states are automatically initialized. + * Existing states can be re-used several times, using LZ4_resetStreamHC(). + * These methods are API and ABI stable, they can be used in combination with a DLL. */ LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); @@ -123,13 +123,13 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in */ - /*-************************************* +/*-************************************************************** * PRIVATE DEFINITIONS : * Do not use these definitions. * They are exposed to allow static allocation of `LZ4_streamHC_t`. * Using these definitions makes the code vulnerable to potential API break when upgrading LZ4 - **************************************/ -#define LZ4HC_DICTIONARY_LOGSIZE 17 /* because of btopt, hc would only need 16 */ + ****************************************************************/ +#define LZ4HC_DICTIONARY_LOGSIZE 16 #define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) #define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) @@ -152,8 +152,7 @@ typedef struct uint32_t dictLimit; /* below that point, need extDict */ uint32_t lowLimit; /* below that point, no more dict */ uint32_t nextToUpdate; /* index from which to continue dictionary update */ - uint32_t searchNum; /* only for optimal parser */ - uint32_t compressionLevel; + int compressionLevel; } LZ4HC_CCtx_internal; #else @@ -169,13 +168,12 @@ typedef struct unsigned int dictLimit; /* below that point, need extDict */ unsigned int lowLimit; /* below that point, no more dict */ unsigned int nextToUpdate; /* index from which to continue dictionary update */ - unsigned int searchNum; /* only for optimal parser */ int compressionLevel; } LZ4HC_CCtx_internal; #endif -#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56) /* 393268 */ +#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56) /* 262200 */ #define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t)) union LZ4_streamHC_u { size_t table[LZ4_STREAMHCSIZE_SIZET]; @@ -197,7 +195,6 @@ union LZ4_streamHC_u { /* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */ /* deprecated compression functions */ -/* these functions will trigger warning messages in future releases */ LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC (const char* source, char* dest, int inputSize); LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); @@ -225,14 +222,15 @@ LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStr #endif /* LZ4_HC_H_19834876238432 */ -/*-************************************************ + +/*-************************************************** * !!!!! STATIC LINKING ONLY !!!!! * Following definitions are considered experimental. * They should not be linked from DLL, * as there is no guarantee of API stability yet. * Prototypes will be promoted to "stable" status * after successfull usage in real-life scenarios. - *************************************************/ + ***************************************************/ #ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */ #ifndef LZ4_HC_SLO_098092834 #define LZ4_HC_SLO_098092834 @@ -258,17 +256,13 @@ int LZ4_compress_HC_destSize(void* LZ4HC_Data, * @return : the number of bytes written into 'dst' * or 0 if compression fails. * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`. - * Important : due to limitations, this prototype only works well up to cLevel < LZ4HC_CLEVEL_OPT_MIN - * beyond that level, compression performance will be much reduced due to internal incompatibilities */ int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDstSize); /*! LZ4_setCompressionLevel() : v1.8.0 (experimental) - * It's possible to change compression level after LZ4_resetStreamHC(), between 2 invocations of LZ4_compress_HC_continue*(), - * but that requires to stay in the same mode (aka 1-10 or 11-12). - * This function ensures this condition. + * It's possible to change compression level between 2 invocations of LZ4_compress_HC_continue*() */ void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); diff --git a/lib/lz4opt.h b/lib/lz4opt.h index 584dc97..9917851 100644 --- a/lib/lz4opt.h +++ b/lib/lz4opt.h @@ -35,12 +35,6 @@ #define LZ4_OPT_NUM (1<<12) - -typedef struct { - int off; - int len; -} LZ4HC_match_t; - typedef struct { int price; int off; @@ -50,317 +44,286 @@ typedef struct { /* price in bytes */ -LZ4_FORCE_INLINE size_t LZ4HC_literalsPrice(size_t litlen) +LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) { - size_t price = litlen; - if (litlen >= (size_t)RUN_MASK) + int price = litlen; + if (litlen >= (int)RUN_MASK) price += 1 + (litlen-RUN_MASK)/255; return price; } /* requires mlen >= MINMATCH */ -LZ4_FORCE_INLINE size_t LZ4HC_sequencePrice(size_t litlen, size_t mlen) +LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) { - size_t price = 2 + 1; /* 16-bit offset + token */ + int price = 1 + 2 ; /* token + 16-bit offset */ price += LZ4HC_literalsPrice(litlen); - if (mlen >= (size_t)(ML_MASK+MINMATCH)) - price+= 1 + (mlen-(ML_MASK+MINMATCH))/255; + if (mlen >= (int)(ML_MASK+MINMATCH)) + price += 1 + (mlen-(ML_MASK+MINMATCH))/255; return price; } /*-************************************* -* Binary Tree search +* Match finder ***************************************/ -LZ4_FORCE_INLINE int LZ4HC_BinTree_InsertAndGetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, - const BYTE* const iHighLimit, - size_t best_mlen, - LZ4HC_match_t* matches, - int* matchNum) -{ - U16* const chainTable = ctx->chainTable; - U32* const HashTable = ctx->hashTable; - const BYTE* const base = ctx->base; - const U32 dictLimit = ctx->dictLimit; - const U32 current = (U32)(ip - base); - const U32 lowLimit = (ctx->lowLimit + MAX_DISTANCE > current) ? ctx->lowLimit : current - (MAX_DISTANCE - 1); - const BYTE* const dictBase = ctx->dictBase; - const BYTE* match; - int nbAttempts = ctx->searchNum; - int mnum = 0; - U16 *ptr0, *ptr1, delta0, delta1; - U32 matchIndex; - size_t matchLength = 0; - U32* HashPos; - - if (ip + MINMATCH > iHighLimit) return 1; - - /* HC4 match finder */ - HashPos = &HashTable[LZ4HC_hashPtr(ip)]; - matchIndex = *HashPos; - *HashPos = current; - - ptr0 = &DELTANEXTMAXD(current*2+1); - ptr1 = &DELTANEXTMAXD(current*2); - delta0 = delta1 = (U16)(current - matchIndex); - - while ((matchIndex < current) && (matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - match = base + matchIndex; - matchLength = LZ4_count(ip, match, iHighLimit); - } else { - const BYTE* vLimit = ip + (dictLimit - matchIndex); - match = dictBase + matchIndex; - if (vLimit > iHighLimit) vLimit = iHighLimit; - matchLength = LZ4_count(ip, match, vLimit); - if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) - matchLength += LZ4_count(ip+matchLength, base+dictLimit, iHighLimit); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > best_mlen) { - best_mlen = matchLength; - if (matches) { - if (matchIndex >= dictLimit) - matches[mnum].off = (int)(ip - match); - else - matches[mnum].off = (int)(ip - (base + matchIndex)); /* virtual matchpos */ - matches[mnum].len = (int)matchLength; - mnum++; - } - if (best_mlen > LZ4_OPT_NUM) break; - } - - if (ip+matchLength >= iHighLimit) /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ - - DEBUGLOG(6, "ip :%016llX", (U64)ip); - DEBUGLOG(6, "match:%016llX", (U64)match); - if (*(ip+matchLength) < *(match+matchLength)) { - *ptr0 = delta0; - ptr0 = &DELTANEXTMAXD(matchIndex*2); - if (*ptr0 == (U16)-1) break; - delta0 = *ptr0; - delta1 += delta0; - matchIndex -= delta0; - } else { - *ptr1 = delta1; - ptr1 = &DELTANEXTMAXD(matchIndex*2+1); - if (*ptr1 == (U16)-1) break; - delta1 = *ptr1; - delta0 += delta1; - matchIndex -= delta1; - } - } - - *ptr0 = (U16)-1; - *ptr1 = (U16)-1; - if (matchNum) *matchNum = mnum; - /* if (best_mlen > 8) return best_mlen-8; */ - if (!matchNum) return 1; - return 1; -} - - -LZ4_FORCE_INLINE void LZ4HC_updateBinTree(LZ4HC_CCtx_internal* ctx, const BYTE* const ip, const BYTE* const iHighLimit) -{ - const BYTE* const base = ctx->base; - const U32 target = (U32)(ip - base); - U32 idx = ctx->nextToUpdate; - while(idx < target) - idx += LZ4HC_BinTree_InsertAndGetAllMatches(ctx, base+idx, iHighLimit, 8, NULL, NULL); -} - +typedef struct { + int off; + int len; +} LZ4HC_match_t; -/** Tree updater, providing best match */ -LZ4_FORCE_INLINE int LZ4HC_BinTree_GetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, const BYTE* const iHighLimit, - size_t best_mlen, LZ4HC_match_t* matches, const int fullUpdate) +LZ4_FORCE_INLINE +LZ4HC_match_t LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, + const BYTE* ip, const BYTE* const iHighLimit, + int minLen, int nbSearches) { - int mnum = 0; - if (ip < ctx->base + ctx->nextToUpdate) return 0; /* skipped area */ - if (fullUpdate) LZ4HC_updateBinTree(ctx, ip, iHighLimit); - best_mlen = LZ4HC_BinTree_InsertAndGetAllMatches(ctx, ip, iHighLimit, best_mlen, matches, &mnum); - ctx->nextToUpdate = (U32)(ip - ctx->base + best_mlen); - return mnum; -} - - -#define SET_PRICE(pos, ml, offset, ll, cost) \ -{ \ - while (last_pos < pos) { opt[last_pos+1].price = 1<<30; last_pos++; } \ - opt[pos].mlen = (int)ml; \ - opt[pos].off = (int)offset; \ - opt[pos].litlen = (int)ll; \ - opt[pos].price = (int)cost; \ + LZ4HC_match_t match = { 0 , 0 }; + const BYTE* matchPtr = NULL; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + int const matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches); + if (matchLength <= minLen) return match; + match.len = matchLength; + match.off = (int)(ip-matchPtr); + return match; } static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, const char* const source, - char* dest, + char* dst, int inputSize, - int maxOutputSize, + int dstCapacity, limitedOutput_directive limit, + int const nbSearches, size_t sufficient_len, - const int fullUpdate + int const fullUpdate ) { - LZ4HC_optimal_t opt[LZ4_OPT_NUM + 1]; /* this uses a bit too much stack memory to my taste ... */ - LZ4HC_match_t matches[LZ4_OPT_NUM + 1]; +#define TRAILING_LITERALS 3 + LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* this uses a bit too much stack memory to my taste ... */ const BYTE* ip = (const BYTE*) source; const BYTE* anchor = ip; const BYTE* const iend = ip + inputSize; const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - BYTE* op = (BYTE*) dest; - BYTE* const oend = op + maxOutputSize; + const BYTE* const matchlimit = iend - LASTLITERALS; + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + dstCapacity; /* init */ DEBUGLOG(5, "LZ4HC_compress_optimal"); if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - ctx->end += inputSize; - ip++; /* Main Loop */ + assert(ip - anchor < LZ4_MAX_INPUT_SIZE); while (ip < mflimit) { - size_t const llen = ip - anchor; - size_t last_pos = 0; - size_t match_num, cur, best_mlen, best_off; - memset(opt, 0, sizeof(LZ4HC_optimal_t)); /* memset only the first one */ + int const llen = (int)(ip - anchor); + int best_mlen, best_off; + int cur, last_match_pos = 0; - match_num = LZ4HC_BinTree_GetAllMatches(ctx, ip, matchlimit, MINMATCH-1, matches, fullUpdate); - if (!match_num) { ip++; continue; } + LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches); + if (firstMatch.len==0) { ip++; continue; } - if ((size_t)matches[match_num-1].len > sufficient_len) { + if ((size_t)firstMatch.len > sufficient_len) { /* good enough solution : immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - cur = 0; - last_pos = 1; - goto encode; + int const firstML = firstMatch.len; + const BYTE* const matchPos = ip - firstMatch.off; + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */ + return 0; /* error */ + continue; } - /* set prices using matches at position = 0 */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t mlen = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = matches[matchNb].len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - for ( ; mlen <= best_mlen ; mlen++) { - size_t const cost = LZ4HC_sequencePrice(llen, mlen) - LZ4HC_literalsPrice(llen); - SET_PRICE(mlen, mlen, matches[matchNb].off, 0, cost); /* updates last_pos and opt[pos] */ - } } } - - if (last_pos < MINMATCH) { ip++; continue; } /* note : on clang at least, this test improves performance */ + /* set prices for first positions (literals) */ + { int rPos; + for (rPos = 0 ; rPos < MINMATCH ; rPos++) { + int const cost = LZ4HC_literalsPrice(llen + rPos); + opt[rPos].mlen = 1; + opt[rPos].off = 0; + opt[rPos].litlen = llen + rPos; + opt[rPos].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + rPos, cost, opt[rPos].litlen); + } } + /* set prices using initial match */ + { int mlen = MINMATCH; + int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ + int const offset = firstMatch.off; + assert(matchML < LZ4_OPT_NUM); + for ( ; mlen <= matchML ; mlen++) { + int const cost = LZ4HC_sequencePrice(llen, mlen); + opt[mlen].mlen = mlen; + opt[mlen].off = offset; + opt[mlen].litlen = llen; + opt[mlen].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", + mlen, cost, mlen); + } } + last_match_pos = firstMatch.len; + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } /* check further positions */ - opt[0].mlen = opt[1].mlen = 1; - for (cur = 1; cur <= last_pos; cur++) { + for (cur = 1; cur < last_match_pos; cur++) { const BYTE* const curPtr = ip + cur; - - /* establish baseline price if cur is literal */ - { size_t price, litlen; - if (opt[cur-1].mlen == 1) { - /* no match at previous position */ - litlen = opt[cur-1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + LZ4HC_literalsPrice(litlen); - } else { - price = LZ4HC_literalsPrice(llen + litlen) - LZ4HC_literalsPrice(llen); - } - } else { - litlen = 1; - price = opt[cur - 1].price + LZ4HC_literalsPrice(1); - } - - if (price < (size_t)opt[cur].price) - SET_PRICE(cur, 1 /*mlen*/, 0 /*off*/, litlen, price); /* note : increases last_pos */ + LZ4HC_match_t newMatch; + + if (curPtr >= mflimit) break; + DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", + cur, opt[cur].price, opt[cur+1].price, cur+1); + if (fullUpdate) { + /* not useful to search here if next position has same (or lower) cost */ + if ( (opt[cur+1].price <= opt[cur].price) + /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ + && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) + continue; + } else { + /* not useful to search here if next position has same (or lower) cost */ + if (opt[cur+1].price <= opt[cur].price) continue; } - if (cur == last_pos || curPtr >= mflimit) break; + DEBUGLOG(7, "search at rPos:%u", cur); + if (fullUpdate) + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches); + else + /* only test matches of minimum length; slightly faster, but misses a few bytes */ + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches); + if (!newMatch.len) continue; - match_num = LZ4HC_BinTree_GetAllMatches(ctx, curPtr, matchlimit, MINMATCH-1, matches, fullUpdate); - if ((match_num > 0) && (size_t)matches[match_num-1].len > sufficient_len) { + if ( ((size_t)newMatch.len > sufficient_len) + || (newMatch.len + cur >= LZ4_OPT_NUM) ) { /* immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - last_pos = cur + 1; + best_mlen = newMatch.len; + best_off = newMatch.off; + last_match_pos = cur + 1; goto encode; } - /* set prices using matches at position = cur */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t ml = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = (cur + matches[matchNb].len < LZ4_OPT_NUM) ? - (size_t)matches[matchNb].len : LZ4_OPT_NUM - cur; - - for ( ; ml <= best_mlen ; ml++) { - size_t ll, price; - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - if (cur > ll) - price = opt[cur - ll].price + LZ4HC_sequencePrice(ll, ml); - else - price = LZ4HC_sequencePrice(llen + ll, ml) - LZ4HC_literalsPrice(llen); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - if (cur + ml > last_pos || price < (size_t)opt[cur + ml].price) { - SET_PRICE(cur + ml, ml, matches[matchNb].off, ll, price); - } } } } - } /* for (cur = 1; cur <= last_pos; cur++) */ - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - -encode: /* cur, last_pos, best_mlen, best_off must be set */ - opt[0].mlen = 1; - while (1) { /* from end to beginning */ - size_t const ml = opt[cur].mlen; - int const offset = opt[cur].off; - opt[cur].mlen = (int)best_mlen; - opt[cur].off = (int)best_off; - best_mlen = ml; - best_off = offset; - if (ml > cur) break; /* can this happen ? */ - cur -= ml; - } + /* before match : set price with literals at beginning */ + { int const baseLitlen = opt[cur].litlen; + int litlen; + for (litlen = 1; litlen < MINMATCH; litlen++) { + int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); + int const pos = cur + litlen; + if (price < opt[pos].price) { + opt[pos].mlen = 1; /* literal */ + opt[pos].off = 0; + opt[pos].litlen = baseLitlen+litlen; + opt[pos].price = price; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", + pos, price, opt[pos].litlen); + } } } + + /* set prices using match at position = cur */ + { int const matchML = newMatch.len; + int ml = MINMATCH; + + assert(cur + newMatch.len < LZ4_OPT_NUM); + for ( ; ml <= matchML ; ml++) { + int const pos = cur + ml; + int const offset = newMatch.off; + int price; + int ll; + DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", + pos, last_match_pos); + if (opt[cur].mlen == 1) { + ll = opt[cur].litlen; + price = ((cur > ll) ? opt[cur - ll].price : 0) + + LZ4HC_sequencePrice(ll, ml); + } else { + ll = 0; + price = opt[cur].price + LZ4HC_sequencePrice(0, ml); + } - /* encode all recorded sequences */ - cur = 0; - while (cur < last_pos) { - int const ml = opt[cur].mlen; - int const offset = opt[cur].off; - if (ml == 1) { ip++; cur++; continue; } - cur += ml; - if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) return 0; - } + if (pos > last_match_pos+TRAILING_LITERALS || price <= opt[pos].price) { + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", + pos, price, ml); + assert(pos < LZ4_OPT_NUM); + if ( (ml == matchML) /* last pos of last match */ + && (last_match_pos < pos) ) + last_match_pos = pos; + opt[pos].mlen = ml; + opt[pos].off = offset; + opt[pos].litlen = ll; + opt[pos].price = price; + } } } + /* complete following positions with literals */ + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + } /* for (cur = 1; cur <= last_match_pos; cur++) */ + + best_mlen = opt[last_match_pos].mlen; + best_off = opt[last_match_pos].off; + cur = last_match_pos - best_mlen; + +encode: /* cur, last_match_pos, best_mlen, best_off must be set */ + assert(cur < LZ4_OPT_NUM); + assert(last_match_pos >= 1); /* == 1 when only one candidate */ + DEBUGLOG(6, "reverse traversal, looking for shortest path") + DEBUGLOG(6, "last_match_pos = %i", last_match_pos); + { int candidate_pos = cur; + int selected_matchLength = best_mlen; + int selected_offset = best_off; + while (1) { /* from end to beginning */ + int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ + int const next_offset = opt[candidate_pos].off; + DEBUGLOG(6, "pos %i: sequence length %i", candidate_pos, selected_matchLength); + opt[candidate_pos].mlen = selected_matchLength; + opt[candidate_pos].off = selected_offset; + selected_matchLength = next_matchLength; + selected_offset = next_offset; + if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ + assert(next_matchLength > 0); /* can be 1, means literal */ + candidate_pos -= next_matchLength; + } } + + /* encode all recorded sequences in order */ + { int rPos = 0; /* relative position (to ip) */ + while (rPos < last_match_pos) { + int const ml = opt[rPos].mlen; + int const offset = opt[rPos].off; + if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ + rPos += ml; + assert(ml >= MINMATCH); + assert((offset >= 1) && (offset <= MAX_DISTANCE)); + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */ + return 0; /* error */ + } } } /* while (ip < mflimit) */ /* Encode Last Literals */ { int lastRun = (int)(iend - anchor); - if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */ - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (BYTE)(lastRun<<ML_BITS); + if ( (limit) + && (((char*)op - dst) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)dstCapacity)) + return 0; /* Check output limit */ + if (lastRun >= (int)RUN_MASK) { + *op++=(RUN_MASK<<ML_BITS); + lastRun-=RUN_MASK; + for (; lastRun > 254 ; lastRun-=255) *op++ = 255; + *op++ = (BYTE) lastRun; + } else *op++ = (BYTE)(lastRun<<ML_BITS); memcpy(op, anchor, iend - anchor); op += iend-anchor; } /* End */ - return (int) ((char*)op-dest); + return (int) ((char*)op-dst); } |