diff options
Diffstat (limited to 'lib/lz4.c')
-rw-r--r-- | lib/lz4.c | 299 |
1 files changed, 203 insertions, 96 deletions
@@ -521,6 +521,18 @@ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tab return LZ4_hash4(LZ4_read32(p), tableType); } +static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase) { switch (tableType) @@ -538,6 +550,20 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); } +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; assert(h < (1U << (LZ4_MEMORY_USAGE-2))); return hashTable[h]; } + if (tableType == byU16) { const U16* const hashTable = (const U16*) tableBase; assert(h < (1U << (LZ4_MEMORY_USAGE-1))); return hashTable[h]; } + assert(0); return 0; /* forbidden case */ +} + static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) { if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } @@ -566,10 +592,12 @@ LZ4_FORCE_INLINE void LZ4_prepareTable( || tableType == byPtr || inputSize >= 4 KB) { - DEBUGLOG(4, "Resetting table in %p", cctx); + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); cctx->currentOffset = 0; cctx->tableType = clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); } } @@ -578,6 +606,7 @@ LZ4_FORCE_INLINE void LZ4_prepareTable( * currentOffset == 0 is faster still, so we preserve that case. */ if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); cctx->currentOffset += 64 KB; } @@ -603,8 +632,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( { const BYTE* ip = (const BYTE*) source; - size_t currentOffset = cctx->currentOffset; - const BYTE* base = (const BYTE*) source - currentOffset; + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*) source - startIndex; const BYTE* lowLimit; const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; @@ -612,8 +641,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; const U32 dictSize = dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ - const BYTE* const lowRefLimit = (const BYTE*) source - dictSize; + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ const BYTE* const dictEnd = dictionary + dictSize; const BYTE* anchor = (const BYTE*) source; const BYTE* const iend = ip + inputSize; @@ -623,21 +654,22 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( /* the dictCtx currentOffset is indexed on the start of the dictionary, * while a dictionary in the current context precedes the currentOffset */ const BYTE* dictBase = dictDirective == usingDictCtx ? - (const BYTE*) source - dictCtx->currentOffset : - (const BYTE*) source - dictSize - currentOffset; - const ptrdiff_t dictDelta = dictionary ? dictEnd - (const BYTE*) source : 0; - const BYTE* dictLowLimit; + dictionary + dictSize - dictCtx->currentOffset : /* is it possible that dictCtx->currentOffset != dictCtx->dictSize ? Yes if the dictionary context is not reset */ + dictionary + dictSize - startIndex; BYTE* op = (BYTE*) dest; BYTE* const olimit = op + maxOutputSize; + U32 offset = 0; U32 forwardH; + DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType); /* Init conditions */ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */ + if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ + assert(acceleration >= 1); lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); - dictLowLimit = dictionary ? dictionary : lowLimit; if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ @@ -661,12 +693,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( /* Main Loop */ for ( ; ; ) { - ptrdiff_t refDelta = 0; const BYTE* match; BYTE* token; /* Find a match */ - { const BYTE* forwardIp = ip; + if (tableType == byPtr) { + const BYTE* forwardIp = ip; unsigned step = 1; unsigned searchMatchNb = acceleration << LZ4_skipTrigger; do { @@ -679,34 +711,71 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( assert(ip < mflimitPlusOne); match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while ( (match+MAX_DISTANCE < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + if (dictDirective == usingDictCtx) { - if (match < (const BYTE*)source) { + if (matchIndex < startIndex) { /* there was no match, try the dictionary */ - match = LZ4_getPosition(ip, dictCtx->hashTable, byU32, dictBase); - refDelta = dictDelta; - lowLimit = dictLowLimit; + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ + lowLimit = dictionary; } else { - refDelta = 0; + match = base + matchIndex; lowLimit = (const BYTE*)source; } } else if (dictDirective==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; - lowLimit = dictLowLimit; + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + match = dictBase + matchIndex; + lowLimit = dictionary; } else { - refDelta = 0; + match = base + matchIndex; lowLimit = (const BYTE*)source; - } } + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */ + if ((tableType != byU16) && (current - matchIndex > MAX_DISTANCE)) continue; /* too far - note: works even if matchIndex overflows */ + if (tableType == byU16) assert((current - matchIndex) <= MAX_DISTANCE); /* too_far presumed impossible with byU16 */ + + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } - } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) - || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) - || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); + } while(1); } /* Catch up */ - while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } + while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } /* Encode Literals */ { unsigned const litLength = (unsigned)(ip - anchor); @@ -725,30 +794,49 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( /* Copy Literals */ LZ4_wildCopy(op, anchor, op+litLength); op+=litLength; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source)); } _next_match: + /* at this stage, the following variables must be correctly set : + * - ip : at start of LZ operation + * - match : at start of previous pattern occurence; can be within current prefix, or within extDict + * - offset : if maybe_ext_memSegment==1 (constant) + * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise + * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written + */ + /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip-match)); op+=2; + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= MAX_DISTANCE && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= MAX_DISTANCE); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } /* Encode MatchLength */ { unsigned matchCode; - if ((dictDirective==usingExtDict || dictDirective==usingDictCtx) && lowLimit==dictionary) { - const BYTE* limit; - match += refDelta; - limit = ip + (dictEnd-match); + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); if (limit > matchlimit) limit = matchlimit; matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); ip += MINMATCH + matchCode; if (ip==limit) { - unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); matchCode += more; ip += more; } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); } else { matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); ip += MINMATCH + matchCode; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); } if ( outputLimited && /* Check output buffer overflow */ @@ -778,33 +866,57 @@ _next_match: LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); /* Test next position */ - match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); - if (dictDirective == usingDictCtx) { - if (match < (const BYTE*)source) { - /* there was no match, try the dictionary */ - match = LZ4_getPosition(ip, dictCtx->hashTable, byU32, dictBase); - refDelta = dictDelta; - lowLimit = dictLowLimit; - } else { - refDelta = 0; - lowLimit = (const BYTE*)source; + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if ( (match+MAX_DISTANCE >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; } - } else if (dictDirective==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; - lowLimit = dictLowLimit; - } else { - refDelta = 0; - lowLimit = (const BYTE*)source; - } } - LZ4_putPosition(ip, cctx->hashTable, tableType, base); - if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) - && (match+MAX_DISTANCE>=ip) - && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && ((tableType==byU16) ? 1 : (current - matchIndex <= MAX_DISTANCE)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } /* Prepare next loop */ forwardH = LZ4_hashPosition(++ip, tableType); + } _last_literals: @@ -838,14 +950,14 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int if (inputSize < LZ4_64Klimit) { return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration); } else { - const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr; + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32; return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration); } } else { if (inputSize < LZ4_64Klimit) {; return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); } else { - const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr; + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32; return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); } } @@ -860,38 +972,38 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of * "correctly initialized"). */ -int LZ4_compress_fast_extState_fastReset(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) { LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; - if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) { + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, inputSize, tableType); + LZ4_prepareTable(ctx, srcSize, tableType); if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, dictSmall, acceleration); + return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, dictSmall, acceleration); } else { - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration); } } else { - const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr; - LZ4_prepareTable(ctx, inputSize, tableType); - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration); } } else { - if (inputSize < LZ4_64Klimit) { + if (srcSize < LZ4_64Klimit) { const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, inputSize, tableType); + LZ4_prepareTable(ctx, srcSize, tableType); if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, dictSmall, acceleration); + return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); } else { - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); } } else { - const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr; - LZ4_prepareTable(ctx, inputSize, tableType); - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); } } } @@ -1103,11 +1215,12 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); } else { - if (*srcSizePtr < LZ4_64Klimit) + if (*srcSizePtr < LZ4_64Klimit) { return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16); - else - return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr); - } + } else { + tableType_t const tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, tableType); + } } } @@ -1147,7 +1260,7 @@ LZ4_stream_t* LZ4_createStream(void) void LZ4_resetStream (LZ4_stream_t* LZ4_stream) { - DEBUGLOG(5, "LZ4_resetStream %p", LZ4_stream); + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); } @@ -1173,7 +1286,7 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) const BYTE* const dictEnd = p + dictSize; const BYTE* base; - DEBUGLOG(4, "LZ4_loadDict %p", LZ4_dict); + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); LZ4_prepareTable(dict, 0, tableType); @@ -1220,15 +1333,14 @@ void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dic } -static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) { - if ((LZ4_dict->currentOffset > 0x80000000) || - ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */ + if (LZ4_dict->currentOffset + nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ /* rescale hash table */ U32 const delta = LZ4_dict->currentOffset - 64 KB; const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; int i; - DEBUGLOG(4, "LZ4_renormDictT %p", LZ4_dict); + DEBUGLOG(4, "LZ4_renormDictT"); for (i=0; i<LZ4_HASH_SIZE_U32; i++) { if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0; else LZ4_dict->hashTable[i] -= delta; @@ -1246,10 +1358,8 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - const BYTE* smallest = (const BYTE*) source; if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ - if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; - LZ4_renormDictT(streamPtr, smallest); + LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; /* Check overlapping input/dictionary space */ @@ -1282,7 +1392,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch if (inputSize > 4 KB) { /* For compressing large blobs, it is faster to pay the setup * cost to copy the dictionary's tables into the active context, - * so that the compression loop is only looking in one table. + * so that the compression loop is only looking into one table. */ memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t)); result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); @@ -1303,25 +1413,22 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch } -/* Hidden debug function, to force external dictionary mode */ -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) { LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; int result; - const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - const BYTE* smallest = dictEnd; - if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; - LZ4_renormDictT(streamPtr, smallest); + LZ4_renormDictT(streamPtr, srcSize); if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, dictSmall, 1); } else { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); } streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)inputSize; + streamPtr->dictSize = (U32)srcSize; return result; } |