diff options
author | Daniel Pfeifer <daniel@pfeifer-mail.de> | 2014-07-13 20:21:58 (GMT) |
---|---|---|
committer | Brad King <brad.king@kitware.com> | 2014-07-29 12:44:36 (GMT) |
commit | 7a92eddbcb2b2e6419062538e346908f0e502586 (patch) | |
tree | b1985ad4a8904211f12cb07742a9d776fa28152f /Utilities | |
parent | b2a07ca49c66665f5b51b592f44ecc4f66c7556b (diff) | |
download | CMake-7a92eddbcb2b2e6419062538e346908f0e502586.zip CMake-7a92eddbcb2b2e6419062538e346908f0e502586.tar.gz CMake-7a92eddbcb2b2e6419062538e346908f0e502586.tar.bz2 |
liblzma: Port from C99 to C89/90
Remove use of designated initializers and declarations of variables
after statements. Leave "//" comments as-is for now.
Diffstat (limited to 'Utilities')
61 files changed, 1080 insertions, 700 deletions
diff --git a/Utilities/cmliblzma/liblzma/check/check.c b/Utilities/cmliblzma/liblzma/check/check.c index 428ddae..979b0a8 100644 --- a/Utilities/cmliblzma/liblzma/check/check.c +++ b/Utilities/cmliblzma/liblzma/check/check.c @@ -16,9 +16,6 @@ extern LZMA_API(lzma_bool) lzma_check_is_supported(lzma_check type) { - if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) - return false; - static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = { true, // LZMA_CHECK_NONE @@ -56,6 +53,9 @@ lzma_check_is_supported(lzma_check type) false, // Reserved }; + if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) + return false; + return available_checks[(unsigned int)(type)]; } @@ -63,9 +63,6 @@ lzma_check_is_supported(lzma_check type) extern LZMA_API(uint32_t) lzma_check_size(lzma_check type) { - if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) - return UINT32_MAX; - // See file-format.txt section 2.1.1.2. static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = { 0, @@ -76,6 +73,9 @@ lzma_check_size(lzma_check type) 64, 64, 64 }; + if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) + return UINT32_MAX; + return check_sizes[(unsigned int)(type)]; } diff --git a/Utilities/cmliblzma/liblzma/check/crc32_fast.c b/Utilities/cmliblzma/liblzma/check/crc32_fast.c index 94da855..13f65b4 100644 --- a/Utilities/cmliblzma/liblzma/check/crc32_fast.c +++ b/Utilities/cmliblzma/liblzma/check/crc32_fast.c @@ -33,6 +33,8 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc) #endif if (size > 8) { + const uint8_t * limit; + // Fix the alignment, if needed. The if statement above // ensures that this won't read past the end of buf[]. while ((uintptr_t)(buf) & 7) { @@ -41,7 +43,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc) } // Calculate the position where to stop. - const uint8_t *const limit = buf + (size & ~(size_t)(7)); + limit = buf + (size & ~(size_t)(7)); // Calculate how many bytes must be calculated separately // before returning the result. @@ -49,6 +51,8 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc) // Calculate the CRC32 using the slice-by-eight algorithm. while (buf < limit) { + uint32_t tmp; + crc ^= *(const uint32_t *)(buf); buf += 4; @@ -57,7 +61,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc) ^ lzma_crc32_table[5][C(crc)] ^ lzma_crc32_table[4][D(crc)]; - const uint32_t tmp = *(const uint32_t *)(buf); + tmp = *(const uint32_t *)(buf); buf += 4; // At least with some compilers, it is critical for diff --git a/Utilities/cmliblzma/liblzma/check/crc64_fast.c b/Utilities/cmliblzma/liblzma/check/crc64_fast.c index 52af29e..1436557 100644 --- a/Utilities/cmliblzma/liblzma/check/crc64_fast.c +++ b/Utilities/cmliblzma/liblzma/check/crc64_fast.c @@ -36,12 +36,14 @@ lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc) #endif if (size > 4) { + const uint8_t *limit; + while ((uintptr_t)(buf) & 3) { crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc); --size; } - const uint8_t *const limit = buf + (size & ~(size_t)(3)); + limit = buf + (size & ~(size_t)(3)); size &= (size_t)(3); while (buf < limit) { diff --git a/Utilities/cmliblzma/liblzma/check/sha256.c b/Utilities/cmliblzma/liblzma/check/sha256.c index 23bda92..b09ccbf 100644 --- a/Utilities/cmliblzma/liblzma/check/sha256.c +++ b/Utilities/cmliblzma/liblzma/check/sha256.c @@ -80,16 +80,21 @@ static const uint32_t SHA256_K[64] = { static void +#ifndef _MSC_VER transform(uint32_t state[static 8], const uint32_t data[static 16]) +#else +transform(uint32_t state[], const uint32_t data[]) +#endif { uint32_t W[16]; uint32_t T[8]; + unsigned int j; // Copy state[] to working vars. memcpy(T, state, sizeof(T)); // 64 operations, partially loop unrolled - for (unsigned int j = 0; j < 64; j += 16) { + for (j = 0; j < 64; j += 16) { R( 0); R( 1); R( 2); R( 3); R( 4); R( 5); R( 6); R( 7); R( 8); R( 9); R(10); R(11); @@ -116,8 +121,9 @@ process(lzma_check_state *check) #else uint32_t data[16]; + size_t i; - for (size_t i = 0; i < 16; ++i) + for (i = 0; i < 16; ++i) data[i] = bswap32(check->buffer.u32[i]); transform(check->state.sha256.state, data); @@ -172,6 +178,8 @@ lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check) extern void lzma_sha256_finish(lzma_check_state *check) { + size_t i; + // Add padding as described in RFC 3174 (it describes SHA-1 but // the same padding style is used for SHA-256 too). size_t pos = check->state.sha256.size & 0x3F; @@ -193,7 +201,7 @@ lzma_sha256_finish(lzma_check_state *check) process(check); - for (size_t i = 0; i < 8; ++i) + for (i = 0; i < 8; ++i) check->buffer.u32[i] = conv32be(check->state.sha256.state[i]); return; diff --git a/Utilities/cmliblzma/liblzma/common/alone_decoder.c b/Utilities/cmliblzma/liblzma/common/alone_decoder.c index c25112e..a20cf49 100644 --- a/Utilities/cmliblzma/liblzma/common/alone_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/alone_decoder.c @@ -126,19 +126,17 @@ alone_decode(lzma_coder *coder, // Fall through case SEQ_CODER_INIT: { - if (coder->memusage > coder->memlimit) - return LZMA_MEMLIMIT_ERROR; + lzma_ret ret; lzma_filter_info filters[2] = { - { - .init = &lzma_lzma_decoder_init, - .options = &coder->options, - }, { - .init = NULL, - } + { 0, &lzma_lzma_decoder_init, &coder->options }, + { 0, NULL, NULL } }; - const lzma_ret ret = lzma_next_filter_init(&coder->next, + if (coder->memusage > coder->memlimit) + return LZMA_MEMLIMIT_ERROR; + + ret = lzma_next_filter_init(&coder->next, allocator, filters); if (ret != LZMA_OK) return ret; @@ -229,7 +227,7 @@ lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit) { - lzma_next_strm_init(lzma_alone_decoder_init, strm, memlimit, false); + lzma_next_strm_init2(lzma_alone_decoder_init, strm, memlimit, false); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/alone_encoder.c b/Utilities/cmliblzma/liblzma/common/alone_encoder.c index eb1697e..62df126 100644 --- a/Utilities/cmliblzma/liblzma/common/alone_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/alone_encoder.c @@ -78,6 +78,14 @@ static lzma_ret alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, const lzma_options_lzma *options) { + uint32_t d; + + // Initialize the LZMA encoder. + const lzma_filter_info filters[2] = { + { 0, &lzma_lzma_encoder_init, (void *)(options) }, + { 0, NULL, NULL } + }; + lzma_next_coder_init(&alone_encoder_init, next, allocator); if (next->coder == NULL) { @@ -107,7 +115,7 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, // one is the next unless it is UINT32_MAX. While the header would // allow any 32-bit integer, we do this to keep the decoder of liblzma // accepting the resulting files. - uint32_t d = options->dict_size - 1; + d = options->dict_size - 1; d |= d >> 2; d |= d >> 3; d |= d >> 4; @@ -121,16 +129,6 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, // - Uncompressed size (always unknown and using EOPM) memset(next->coder->header + 1 + 4, 0xFF, 8); - // Initialize the LZMA encoder. - const lzma_filter_info filters[2] = { - { - .init = &lzma_lzma_encoder_init, - .options = (void *)(options), - }, { - .init = NULL, - } - }; - return lzma_next_filter_init(&next->coder->next, allocator, filters); } @@ -148,7 +146,7 @@ lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options) { - lzma_next_strm_init(alone_encoder_init, strm, options); + lzma_next_strm_init1(alone_encoder_init, strm, options); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/auto_decoder.c b/Utilities/cmliblzma/liblzma/common/auto_decoder.c index 35c895f..6f3c862 100644 --- a/Utilities/cmliblzma/liblzma/common/auto_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/auto_decoder.c @@ -177,7 +177,7 @@ auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags) { - lzma_next_strm_init(auto_decoder_init, strm, memlimit, flags); + lzma_next_strm_init2(auto_decoder_init, strm, memlimit, flags); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c index ff27a11..b4bd388 100644 --- a/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c @@ -18,6 +18,9 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + lzma_next_coder block_decoder; + lzma_ret ret; + if (in_pos == NULL || (in == NULL && *in_pos != in_size) || *in_pos > in_size || out_pos == NULL || (out == NULL && *out_pos != out_size) @@ -25,9 +28,8 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator, return LZMA_PROG_ERROR; // Initialize the Block decoder. - lzma_next_coder block_decoder = LZMA_NEXT_CODER_INIT; - lzma_ret ret = lzma_block_decoder_init( - &block_decoder, allocator, block); + block_decoder = LZMA_NEXT_CODER_INIT; + ret = lzma_block_decoder_init(&block_decoder, allocator, block); if (ret == LZMA_OK) { // Save the positions so that we can restore them in case diff --git a/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c index 519c6a6..136f7f5 100644 --- a/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c @@ -31,6 +31,8 @@ static lzma_vli lzma2_bound(lzma_vli uncompressed_size) { + lzma_vli overhead; + // Prevent integer overflow in overhead calculation. if (uncompressed_size > COMPRESSED_SIZE_MAX) return 0; @@ -39,7 +41,7 @@ lzma2_bound(lzma_vli uncompressed_size) // uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX, // multiply by the size of per-chunk header, and add one byte for // the end marker. - const lzma_vli overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1) + overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1) / LZMA2_CHUNK_MAX) * LZMA2_HEADER_UNCOMPRESSED + 1; @@ -82,15 +84,17 @@ static lzma_ret block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + size_t in_pos = 0; + uint8_t control = 0x01; // Dictionary reset + lzma_filter *filters_orig; + // TODO: Figure out if the last filter is LZMA2 or Subblock and use // that filter to encode the uncompressed chunks. // Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at // all, but LZMA2 always requires a dictionary, so use the minimum // value to minimize memory usage of the decoder. - lzma_options_lzma lzma2 = { - .dict_size = LZMA_DICT_SIZE_MIN, - }; + lzma_options_lzma lzma2 = { LZMA_DICT_SIZE_MIN }; lzma_filter filters[2]; filters[0].id = LZMA_FILTER_LZMA2; @@ -99,7 +103,7 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size, // Set the above filter options to *block temporarily so that we can // encode the Block Header. - lzma_filter *filters_orig = block->filters; + filters_orig = block->filters; block->filters = filters; if (lzma_block_header_size(block) != LZMA_OK) { @@ -128,18 +132,17 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size, *out_pos += block->header_size; // Encode the data using LZMA2 uncompressed chunks. - size_t in_pos = 0; - uint8_t control = 0x01; // Dictionary reset while (in_pos < in_size) { + size_t copy_size; + // Control byte: Indicate uncompressed chunk, of which // the first resets the dictionary. out[(*out_pos)++] = control; control = 0x02; // No dictionary reset // Size of the uncompressed chunk - const size_t copy_size - = my_min(in_size - in_pos, LZMA2_CHUNK_MAX); + copy_size = my_min(in_size - in_pos, LZMA2_CHUNK_MAX); out[(*out_pos)++] = (copy_size - 1) >> 8; out[(*out_pos)++] = (copy_size - 1) & 0xFF; @@ -164,6 +167,10 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + size_t out_start; + lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT; + lzma_ret ret; + // Find out the size of the Block Header. block->compressed_size = lzma2_bound(in_size); if (block->compressed_size == 0) @@ -176,7 +183,7 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator, if (out_size - *out_pos <= block->header_size) return LZMA_BUF_ERROR; - const size_t out_start = *out_pos; + out_start = *out_pos; *out_pos += block->header_size; // Limit out_size so that we stop encoding if the output would grow @@ -186,8 +193,7 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator, // TODO: In many common cases this could be optimized to use // significantly less memory. - lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT; - lzma_ret ret = lzma_raw_encoder_init( + ret = lzma_raw_encoder_init( &raw_encoder, allocator, block->filters); if (ret == LZMA_OK) { @@ -226,6 +232,10 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + size_t check_size; + lzma_ret ret; + size_t i; + // Validate the arguments. if (block == NULL || (in == NULL && in_size != 0) || out == NULL || out_pos == NULL || *out_pos > out_size) @@ -249,7 +259,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator, out_size -= (out_size - *out_pos) & 3; // Get the size of the Check field. - const size_t check_size = lzma_check_size(block->check); + check_size = lzma_check_size(block->check); assert(check_size != UINT32_MAX); // Reserve space for the Check field. @@ -259,7 +269,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator, out_size -= check_size; // Do the actual compression. - const lzma_ret ret = block_encode_normal(block, allocator, + ret = block_encode_normal(block, allocator, in, in_size, out, out_pos, out_size); if (ret != LZMA_OK) { // If the error was something else than output buffer @@ -281,7 +291,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator, // Block Padding. No buffer overflow here, because we already adjusted // out_size so that (out_size - out_start) is a multiple of four. // Thus, if the buffer is full, the loop body can never run. - for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) { + for (i = (size_t)(block->compressed_size); i & 3; ++i) { assert(*out_pos < out_size); out[(*out_pos)++] = 0x00; } diff --git a/Utilities/cmliblzma/liblzma/common/block_decoder.c b/Utilities/cmliblzma/liblzma/common/block_decoder.c index a3ce6f4..3de3851 100644 --- a/Utilities/cmliblzma/liblzma/common/block_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_decoder.c @@ -233,7 +233,7 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_block_decoder(lzma_stream *strm, lzma_block *block) { - lzma_next_strm_init(lzma_block_decoder_init, strm, block); + lzma_next_strm_init1(lzma_block_decoder_init, strm, block); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/block_encoder.c b/Utilities/cmliblzma/liblzma/common/block_encoder.c index 1eeb502..63e2687 100644 --- a/Utilities/cmliblzma/liblzma/common/block_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_encoder.c @@ -208,7 +208,7 @@ lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_block_encoder(lzma_stream *strm, lzma_block *block) { - lzma_next_strm_init(lzma_block_encoder_init, strm, block); + lzma_next_strm_init1(lzma_block_encoder_init, strm, block); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/block_header_decoder.c b/Utilities/cmliblzma/liblzma/common/block_header_decoder.c index 2c9573e..f6e470e 100644 --- a/Utilities/cmliblzma/liblzma/common/block_header_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_header_decoder.c @@ -17,10 +17,12 @@ static void free_properties(lzma_block *block, lzma_allocator *allocator) { + size_t i; + // Free allocated filter options. The last array member is not // touched after the initialization in the beginning of // lzma_block_header_decode(), so we don't need to touch that here. - for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) { + for (i = 0; i < LZMA_FILTERS_MAX; ++i) { lzma_free(block->filters[i].options, allocator); block->filters[i].id = LZMA_VLI_UNKNOWN; block->filters[i].options = NULL; @@ -34,6 +36,13 @@ extern LZMA_API(lzma_ret) lzma_block_header_decode(lzma_block *block, lzma_allocator *allocator, const uint8_t *in) { + const size_t filter_count = (in[1] & 3) + 1; + size_t in_size; + size_t i; + + // Start after the Block Header Size and Block Flags fields. + size_t in_pos = 2; + // NOTE: We consider the header to be corrupt not only when the // CRC32 doesn't match, but also when variable-length integers // are invalid or over 63 bits, or if the header is too small @@ -41,7 +50,7 @@ lzma_block_header_decode(lzma_block *block, // Initialize the filter options array. This way the caller can // safely free() the options even if an error occurs in this function. - for (size_t i = 0; i <= LZMA_FILTERS_MAX; ++i) { + for (i = 0; i <= LZMA_FILTERS_MAX; ++i) { block->filters[i].id = LZMA_VLI_UNKNOWN; block->filters[i].options = NULL; } @@ -56,7 +65,7 @@ lzma_block_header_decode(lzma_block *block, return LZMA_PROG_ERROR; // Exclude the CRC32 field. - const size_t in_size = block->header_size - 4; + in_size = block->header_size - 4; // Verify CRC32 if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size)) @@ -66,9 +75,6 @@ lzma_block_header_decode(lzma_block *block, if (in[1] & 0x3C) return LZMA_OPTIONS_ERROR; - // Start after the Block Header Size and Block Flags fields. - size_t in_pos = 2; - // Compressed Size if (in[1] & 0x40) { return_if_error(lzma_vli_decode(&block->compressed_size, @@ -90,8 +96,7 @@ lzma_block_header_decode(lzma_block *block, block->uncompressed_size = LZMA_VLI_UNKNOWN; // Filter Flags - const size_t filter_count = (in[1] & 3) + 1; - for (size_t i = 0; i < filter_count; ++i) { + for (i = 0; i < filter_count; ++i) { const lzma_ret ret = lzma_filter_flags_decode( &block->filters[i], allocator, in, &in_pos, in_size); diff --git a/Utilities/cmliblzma/liblzma/common/block_header_encoder.c b/Utilities/cmliblzma/liblzma/common/block_header_encoder.c index 707dd0c..650295c 100644 --- a/Utilities/cmliblzma/liblzma/common/block_header_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/block_header_encoder.c @@ -17,12 +17,14 @@ extern LZMA_API(lzma_ret) lzma_block_header_size(lzma_block *block) { - if (block->version != 0) - return LZMA_OPTIONS_ERROR; + size_t i; // Block Header Size + Block Flags + CRC32. uint32_t size = 1 + 1 + 4; + if (block->version != 0) + return LZMA_OPTIONS_ERROR; + // Compressed Size if (block->compressed_size != LZMA_VLI_UNKNOWN) { const uint32_t add = lzma_vli_size(block->compressed_size); @@ -45,12 +47,13 @@ lzma_block_header_size(lzma_block *block) if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) return LZMA_PROG_ERROR; - for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) { + for (i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) { + uint32_t add; + // Don't allow too many filters. if (i == LZMA_FILTERS_MAX) return LZMA_PROG_ERROR; - uint32_t add; return_if_error(lzma_filter_flags_size(&add, block->filters + i)); @@ -73,20 +76,23 @@ lzma_block_header_size(lzma_block *block) extern LZMA_API(lzma_ret) lzma_block_header_encode(const lzma_block *block, uint8_t *out) { + size_t out_size; + size_t out_pos = 2; + size_t filter_count = 0; + // Validate everything but filters. if (lzma_block_unpadded_size(block) == 0 || !lzma_vli_is_valid(block->uncompressed_size)) return LZMA_PROG_ERROR; // Indicate the size of the buffer _excluding_ the CRC32 field. - const size_t out_size = block->header_size - 4; + out_size = block->header_size - 4; // Store the Block Header Size. out[0] = out_size / 4; // We write Block Flags in pieces. out[1] = 0x00; - size_t out_pos = 2; // Compressed Size if (block->compressed_size != LZMA_VLI_UNKNOWN) { @@ -108,7 +114,6 @@ lzma_block_header_encode(const lzma_block *block, uint8_t *out) if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) return LZMA_PROG_ERROR; - size_t filter_count = 0; do { // There can be a maximum of four filters. if (filter_count == LZMA_FILTERS_MAX) diff --git a/Utilities/cmliblzma/liblzma/common/block_util.c b/Utilities/cmliblzma/liblzma/common/block_util.c index 62c9345..4cd34d1 100644 --- a/Utilities/cmliblzma/liblzma/common/block_util.c +++ b/Utilities/cmliblzma/liblzma/common/block_util.c @@ -17,11 +17,14 @@ extern LZMA_API(lzma_ret) lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size) { + uint32_t container_size; + lzma_vli compressed_size; + // Validate everything but Uncompressed Size and filters. if (lzma_block_unpadded_size(block) == 0) return LZMA_PROG_ERROR; - const uint32_t container_size = block->header_size + container_size = block->header_size + lzma_check_size(block->check); // Validate that Compressed Size will be greater than zero. @@ -31,7 +34,7 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size) // Calculate what Compressed Size is supposed to be. // If Compressed Size was present in Block Header, // compare that the new value matches it. - const lzma_vli compressed_size = unpadded_size - container_size; + compressed_size = unpadded_size - container_size; if (block->compressed_size != LZMA_VLI_UNKNOWN && block->compressed_size != compressed_size) return LZMA_DATA_ERROR; @@ -45,6 +48,8 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size) extern LZMA_API(lzma_vli) lzma_block_unpadded_size(const lzma_block *block) { + lzma_vli unpadded_size; + // Validate the values that we are interested in i.e. all but // Uncompressed Size and the filters. // @@ -66,7 +71,7 @@ lzma_block_unpadded_size(const lzma_block *block) return LZMA_VLI_UNKNOWN; // Calculate Unpadded Size and validate it. - const lzma_vli unpadded_size = block->compressed_size + unpadded_size = block->compressed_size + block->header_size + lzma_check_size(block->check); diff --git a/Utilities/cmliblzma/liblzma/common/common.c b/Utilities/cmliblzma/liblzma/common/common.c index b9e3860..d0105e1 100644 --- a/Utilities/cmliblzma/liblzma/common/common.c +++ b/Utilities/cmliblzma/liblzma/common/common.c @@ -38,12 +38,12 @@ lzma_version_string(void) extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1) lzma_alloc(size_t size, lzma_allocator *allocator) { + void *ptr; + // Some malloc() variants return NULL if called with size == 0. if (size == 0) size = 1; - void *ptr; - if (allocator != NULL && allocator->alloc != NULL) ptr = allocator->alloc(allocator->opaque, 1, size); else @@ -173,6 +173,10 @@ lzma_strm_init(lzma_stream *strm) extern LZMA_API(lzma_ret) lzma_code(lzma_stream *strm, lzma_action action) { + size_t in_pos = 0; + size_t out_pos = 0; + lzma_ret ret; + // Sanity checks if ((strm->next_in == NULL && strm->avail_in != 0) || (strm->next_out == NULL && strm->avail_out != 0) @@ -248,9 +252,7 @@ lzma_code(lzma_stream *strm, lzma_action action) return LZMA_PROG_ERROR; } - size_t in_pos = 0; - size_t out_pos = 0; - lzma_ret ret = strm->internal->next.code( + ret = strm->internal->next.code( strm->internal->next.coder, strm->allocator, strm->next_in, &in_pos, strm->avail_in, strm->next_out, &out_pos, strm->avail_out, action); diff --git a/Utilities/cmliblzma/liblzma/common/common.h b/Utilities/cmliblzma/liblzma/common/common.h index 6d7412f..a1a1591 100644 --- a/Utilities/cmliblzma/liblzma/common/common.h +++ b/Utilities/cmliblzma/liblzma/common/common.h @@ -155,18 +155,18 @@ struct lzma_next_coder_s { }; -/// Macro to initialize lzma_next_coder structure -#define LZMA_NEXT_CODER_INIT \ - (lzma_next_coder){ \ - .coder = NULL, \ - .init = (uintptr_t)(NULL), \ - .id = LZMA_VLI_UNKNOWN, \ - .code = NULL, \ - .end = NULL, \ - .get_check = NULL, \ - .memconfig = NULL, \ - .update = NULL, \ - } +/// Constant to initialize lzma_next_coder structure +static const lzma_next_coder LZMA_NEXT_CODER_INIT = + { + NULL, + LZMA_VLI_UNKNOWN, + (uintptr_t)(NULL), + NULL, + NULL, + NULL, + NULL, + NULL, + }; /// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to @@ -211,7 +211,7 @@ extern void lzma_free(void *ptr, lzma_allocator *allocator); /// Allocates strm->internal if it is NULL, and initializes *strm and -/// strm->internal. This function is only called via lzma_next_strm_init macro. +/// strm->internal. This function is only called via lzma_next_strm_init2 macro. extern lzma_ret lzma_strm_init(lzma_stream *strm); /// Initializes the next filter in the chain, if any. This takes care of @@ -269,15 +269,37 @@ do { \ /// (The function being called will use lzma_next_coder_init()). If /// initialization fails, memory that wasn't freed by func() is freed /// along strm->internal. -#define lzma_next_strm_init(func, strm, ...) \ +#define lzma_next_strm_init1(func, strm, arg1) \ do { \ - return_if_error(lzma_strm_init(strm)); \ - const lzma_ret ret_ = func(&(strm)->internal->next, \ - (strm)->allocator, __VA_ARGS__); \ - if (ret_ != LZMA_OK) { \ - lzma_end(strm); \ - return ret_; \ - } \ + lzma_ret ret_; \ + return_if_error(lzma_strm_init(strm)); \ + ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1); \ + if (ret_ != LZMA_OK) { \ + lzma_end(strm); \ + return ret_; \ + } \ +} while (0) + +#define lzma_next_strm_init2(func, strm, arg1, arg2) \ +do { \ + lzma_ret ret_; \ + return_if_error(lzma_strm_init(strm)); \ + ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2); \ + if (ret_ != LZMA_OK) { \ + lzma_end(strm); \ + return ret_; \ + } \ +} while (0) + +#define lzma_next_strm_init3(func, strm, arg1, arg2, arg3) \ +do { \ + lzma_ret ret_; \ + return_if_error(lzma_strm_init(strm)); \ + ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2, arg3); \ + if (ret_ != LZMA_OK) { \ + lzma_end(strm); \ + return ret_; \ + } \ } while (0) #endif diff --git a/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c index 2d35ef8..65665c1 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c @@ -18,22 +18,26 @@ lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + lzma_next_coder next = LZMA_NEXT_CODER_INIT; + size_t in_start; + size_t out_start; + lzma_ret ret; + // Validate what isn't validated later in filter_common.c. if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; // Initialize the decoer. - lzma_next_coder next = LZMA_NEXT_CODER_INIT; return_if_error(lzma_raw_decoder_init(&next, allocator, filters)); // Store the positions so that we can restore them if something // goes wrong. - const size_t in_start = *in_pos; - const size_t out_start = *out_pos; + in_start = *in_pos; + out_start = *out_pos; // Do the actual decoding and free decoder's memory. - lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size, + ret = next.code(next.coder, allocator, in, in_pos, in_size, out, out_pos, out_size, LZMA_FINISH); if (ret == LZMA_STREAM_END) { diff --git a/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c index 646e1b3..b23329f 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c @@ -18,22 +18,25 @@ lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + lzma_next_coder next = LZMA_NEXT_CODER_INIT; + size_t out_start; + size_t in_pos = 0; + lzma_ret ret; + // Validate what isn't validated later in filter_common.c. if ((in == NULL && in_size != 0) || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; // Initialize the encoder - lzma_next_coder next = LZMA_NEXT_CODER_INIT; return_if_error(lzma_raw_encoder_init(&next, allocator, filters)); // Store the output position so that we can restore it if // something goes wrong. - const size_t out_start = *out_pos; + out_start = *out_pos; // Do the actual encoding and free coder's memory. - size_t in_pos = 0; - lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size, + ret = next.code(next.coder, allocator, in, &in_pos, in_size, out, out_pos, out_size, LZMA_FINISH); lzma_next_end(&next, allocator); diff --git a/Utilities/cmliblzma/liblzma/common/filter_common.c b/Utilities/cmliblzma/liblzma/common/filter_common.c index 7c95b05..d2b9e08 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_common.c +++ b/Utilities/cmliblzma/liblzma/common/filter_common.c @@ -36,87 +36,87 @@ static const struct { } features[] = { #if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1) { - .id = LZMA_FILTER_LZMA1, - .options_size = sizeof(lzma_options_lzma), - .non_last_ok = false, - .last_ok = true, - .changes_size = true, + LZMA_FILTER_LZMA1, + sizeof(lzma_options_lzma), + false, + true, + true, }, #endif #if defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2) { - .id = LZMA_FILTER_LZMA2, - .options_size = sizeof(lzma_options_lzma), - .non_last_ok = false, - .last_ok = true, - .changes_size = true, + LZMA_FILTER_LZMA2, + sizeof(lzma_options_lzma), + false, + true, + true, }, #endif #if defined(HAVE_ENCODER_X86) || defined(HAVE_DECODER_X86) { - .id = LZMA_FILTER_X86, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_X86, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC) { - .id = LZMA_FILTER_POWERPC, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_POWERPC, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_IA64) || defined(HAVE_DECODER_IA64) { - .id = LZMA_FILTER_IA64, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_IA64, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM) { - .id = LZMA_FILTER_ARM, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_ARM, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB) { - .id = LZMA_FILTER_ARMTHUMB, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_ARMTHUMB, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC) { - .id = LZMA_FILTER_SPARC, - .options_size = sizeof(lzma_options_bcj), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_SPARC, + sizeof(lzma_options_bcj), + true, + false, + false, }, #endif #if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA) { - .id = LZMA_FILTER_DELTA, - .options_size = sizeof(lzma_options_delta), - .non_last_ok = true, - .last_ok = false, - .changes_size = false, + LZMA_FILTER_DELTA, + sizeof(lzma_options_delta), + true, + false, + false, }, #endif { - .id = LZMA_VLI_UNKNOWN + LZMA_VLI_UNKNOWN } }; @@ -125,11 +125,12 @@ extern LZMA_API(lzma_ret) lzma_filters_copy(const lzma_filter *src, lzma_filter *dest, lzma_allocator *allocator) { + size_t i; + lzma_ret ret; + if (src == NULL || dest == NULL) return LZMA_PROG_ERROR; - lzma_ret ret; - size_t i; for (i = 0; src[i].id != LZMA_VLI_UNKNOWN; ++i) { // There must be a maximum of four filters plus // the array terminator. @@ -193,10 +194,6 @@ error: static lzma_ret validate_chain(const lzma_filter *filters, size_t *count) { - // There must be at least one filter. - if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN) - return LZMA_PROG_ERROR; - // Number of non-last filters that may change the size of the data // significantly (that is, more than 1-2 % or so). size_t changes_size_count = 0; @@ -210,6 +207,11 @@ validate_chain(const lzma_filter *filters, size_t *count) bool last_ok = false; size_t i = 0; + + // There must be at least one filter. + if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN) + return LZMA_PROG_ERROR; + do { size_t j; for (j = 0; filters[i].id != features[j].id; ++j) @@ -243,14 +245,17 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator, const lzma_filter *options, lzma_filter_find coder_find, bool is_encoder) { - // Do some basic validation and get the number of filters. + lzma_filter_info filters[LZMA_FILTERS_MAX + 1]; size_t count; + size_t i; + lzma_ret ret; + + // Do some basic validation and get the number of filters. return_if_error(validate_chain(options, &count)); // Set the filter functions and copy the options pointer. - lzma_filter_info filters[LZMA_FILTERS_MAX + 1]; if (is_encoder) { - for (size_t i = 0; i < count; ++i) { + for (i = 0; i < count; ++i) { // The order of the filters is reversed in the // encoder. It allows more efficient handling // of the uncompressed data. @@ -266,7 +271,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator, filters[j].options = options[i].options; } } else { - for (size_t i = 0; i < count; ++i) { + for (i = 0; i < count; ++i) { const lzma_filter_coder *const fc = coder_find(options[i].id); if (fc == NULL || fc->init == NULL) @@ -283,7 +288,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator, filters[count].init = NULL; // Initialize the filters. - const lzma_ret ret = lzma_next_filter_init(next, allocator, filters); + ret = lzma_next_filter_init(next, allocator, filters); if (ret != LZMA_OK) lzma_next_end(next, allocator); @@ -295,6 +300,9 @@ extern uint64_t lzma_raw_coder_memusage(lzma_filter_find coder_find, const lzma_filter *filters) { + uint64_t total = 0; + size_t i = 0; + // The chain has to have at least one filter. { size_t tmp; @@ -302,9 +310,6 @@ lzma_raw_coder_memusage(lzma_filter_find coder_find, return UINT64_MAX; } - uint64_t total = 0; - size_t i = 0; - do { const lzma_filter_coder *const fc = coder_find(filters[i].id); diff --git a/Utilities/cmliblzma/liblzma/common/filter_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_decoder.c index 1ebbe2a..cce2b30 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_decoder.c @@ -44,74 +44,74 @@ typedef struct { static const lzma_filter_decoder decoders[] = { #ifdef HAVE_DECODER_LZMA1 { - .id = LZMA_FILTER_LZMA1, - .init = &lzma_lzma_decoder_init, - .memusage = &lzma_lzma_decoder_memusage, - .props_decode = &lzma_lzma_props_decode, + LZMA_FILTER_LZMA1, + &lzma_lzma_decoder_init, + &lzma_lzma_decoder_memusage, + &lzma_lzma_props_decode, }, #endif #ifdef HAVE_DECODER_LZMA2 { - .id = LZMA_FILTER_LZMA2, - .init = &lzma_lzma2_decoder_init, - .memusage = &lzma_lzma2_decoder_memusage, - .props_decode = &lzma_lzma2_props_decode, + LZMA_FILTER_LZMA2, + &lzma_lzma2_decoder_init, + &lzma_lzma2_decoder_memusage, + &lzma_lzma2_props_decode, }, #endif #ifdef HAVE_DECODER_X86 { - .id = LZMA_FILTER_X86, - .init = &lzma_simple_x86_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_X86, + &lzma_simple_x86_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_POWERPC { - .id = LZMA_FILTER_POWERPC, - .init = &lzma_simple_powerpc_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_POWERPC, + &lzma_simple_powerpc_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_IA64 { - .id = LZMA_FILTER_IA64, - .init = &lzma_simple_ia64_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_IA64, + &lzma_simple_ia64_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_ARM { - .id = LZMA_FILTER_ARM, - .init = &lzma_simple_arm_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_ARM, + &lzma_simple_arm_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_ARMTHUMB { - .id = LZMA_FILTER_ARMTHUMB, - .init = &lzma_simple_armthumb_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_ARMTHUMB, + &lzma_simple_armthumb_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_SPARC { - .id = LZMA_FILTER_SPARC, - .init = &lzma_simple_sparc_decoder_init, - .memusage = NULL, - .props_decode = &lzma_simple_props_decode, + LZMA_FILTER_SPARC, + &lzma_simple_sparc_decoder_init, + NULL, + &lzma_simple_props_decode, }, #endif #ifdef HAVE_DECODER_DELTA { - .id = LZMA_FILTER_DELTA, - .init = &lzma_delta_decoder_init, - .memusage = &lzma_delta_coder_memusage, - .props_decode = &lzma_delta_props_decode, + LZMA_FILTER_DELTA, + &lzma_delta_decoder_init, + &lzma_delta_coder_memusage, + &lzma_delta_props_decode, }, #endif }; @@ -120,7 +120,8 @@ static const lzma_filter_decoder decoders[] = { static const lzma_filter_decoder * decoder_find(lzma_vli id) { - for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) + size_t i; + for (i = 0; i < ARRAY_SIZE(decoders); ++i) if (decoders[i].id == id) return decoders + i; @@ -147,7 +148,7 @@ lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options) { - lzma_next_strm_init(lzma_raw_decoder_init, strm, options); + lzma_next_strm_init1(lzma_raw_decoder_init, strm, options); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; @@ -168,10 +169,11 @@ extern LZMA_API(lzma_ret) lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator, const uint8_t *props, size_t props_size) { + const lzma_filter_decoder *const fd = decoder_find(filter->id); + // Make it always NULL so that the caller can always safely free() it. filter->options = NULL; - const lzma_filter_decoder *const fd = decoder_find(filter->id); if (fd == NULL) return LZMA_OPTIONS_ERROR; diff --git a/Utilities/cmliblzma/liblzma/common/filter_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_encoder.c index 635d812..9fdb100 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_encoder.c @@ -56,95 +56,101 @@ typedef struct { static const lzma_filter_encoder encoders[] = { #ifdef HAVE_ENCODER_LZMA1 { - .id = LZMA_FILTER_LZMA1, - .init = &lzma_lzma_encoder_init, - .memusage = &lzma_lzma_encoder_memusage, - .chunk_size = NULL, // FIXME - .props_size_get = NULL, - .props_size_fixed = 5, - .props_encode = &lzma_lzma_props_encode, + LZMA_FILTER_LZMA1, + &lzma_lzma_encoder_init, + &lzma_lzma_encoder_memusage, + NULL, // FIXME + NULL, + 5, + &lzma_lzma_props_encode, }, #endif #ifdef HAVE_ENCODER_LZMA2 { - .id = LZMA_FILTER_LZMA2, - .init = &lzma_lzma2_encoder_init, - .memusage = &lzma_lzma2_encoder_memusage, - .chunk_size = NULL, // FIXME - .props_size_get = NULL, - .props_size_fixed = 1, - .props_encode = &lzma_lzma2_props_encode, + LZMA_FILTER_LZMA2, + &lzma_lzma2_encoder_init, + &lzma_lzma2_encoder_memusage, + NULL, // FIXME + NULL, + 1, + &lzma_lzma2_props_encode, }, #endif #ifdef HAVE_ENCODER_X86 { - .id = LZMA_FILTER_X86, - .init = &lzma_simple_x86_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_X86, + &lzma_simple_x86_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_POWERPC { - .id = LZMA_FILTER_POWERPC, - .init = &lzma_simple_powerpc_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_POWERPC, + &lzma_simple_powerpc_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_IA64 { - .id = LZMA_FILTER_IA64, - .init = &lzma_simple_ia64_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_IA64, + &lzma_simple_ia64_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_ARM { - .id = LZMA_FILTER_ARM, - .init = &lzma_simple_arm_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_ARM, + &lzma_simple_arm_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_ARMTHUMB { - .id = LZMA_FILTER_ARMTHUMB, - .init = &lzma_simple_armthumb_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_ARMTHUMB, + &lzma_simple_armthumb_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_SPARC { - .id = LZMA_FILTER_SPARC, - .init = &lzma_simple_sparc_encoder_init, - .memusage = NULL, - .chunk_size = NULL, - .props_size_get = &lzma_simple_props_size, - .props_encode = &lzma_simple_props_encode, + LZMA_FILTER_SPARC, + &lzma_simple_sparc_encoder_init, + NULL, + NULL, + &lzma_simple_props_size, + 0, + &lzma_simple_props_encode, }, #endif #ifdef HAVE_ENCODER_DELTA { - .id = LZMA_FILTER_DELTA, - .init = &lzma_delta_encoder_init, - .memusage = &lzma_delta_coder_memusage, - .chunk_size = NULL, - .props_size_get = NULL, - .props_size_fixed = 1, - .props_encode = &lzma_delta_props_encode, + LZMA_FILTER_DELTA, + &lzma_delta_encoder_init, + &lzma_delta_coder_memusage, + NULL, + NULL, + 1, + &lzma_delta_props_encode, }, #endif }; @@ -153,7 +159,8 @@ static const lzma_filter_encoder encoders[] = { static const lzma_filter_encoder * encoder_find(lzma_vli id) { - for (size_t i = 0; i < ARRAY_SIZE(encoders); ++i) + size_t i; + for (i = 0; i < ARRAY_SIZE(encoders); ++i) if (encoders[i].id == id) return encoders + i; @@ -171,6 +178,10 @@ lzma_filter_encoder_is_supported(lzma_vli id) extern LZMA_API(lzma_ret) lzma_filters_update(lzma_stream *strm, const lzma_filter *filters) { + size_t i; + size_t count = 1; + lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1]; + if (strm->internal->next.update == NULL) return LZMA_PROG_ERROR; @@ -180,12 +191,10 @@ lzma_filters_update(lzma_stream *strm, const lzma_filter *filters) // The actual filter chain in the encoder is reversed. Some things // still want the normal order chain, so we provide both. - size_t count = 1; while (filters[count].id != LZMA_VLI_UNKNOWN) ++count; - lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1]; - for (size_t i = 0; i < count; ++i) + for (i = 0; i < count; ++i) reversed_filters[count - i - 1] = filters[i]; reversed_filters[count].id = LZMA_VLI_UNKNOWN; @@ -207,7 +216,7 @@ lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options) { - lzma_next_strm_init(lzma_raw_coder_init, strm, options, + lzma_next_strm_init3(lzma_raw_coder_init, strm, options, (lzma_filter_find)(&encoder_find), true); strm->internal->supported_actions[LZMA_RUN] = true; diff --git a/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c index caae10c..aa2dbd5 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c @@ -18,6 +18,9 @@ lzma_filter_flags_decode( lzma_filter *filter, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size) { + lzma_vli props_size; + lzma_ret ret; + // Set the pointer to NULL so the caller can always safely free it. filter->options = NULL; @@ -29,7 +32,6 @@ lzma_filter_flags_decode( return LZMA_DATA_ERROR; // Size of Properties - lzma_vli props_size; return_if_error(lzma_vli_decode(&props_size, NULL, in, in_pos, in_size)); @@ -37,7 +39,7 @@ lzma_filter_flags_decode( if (in_size - *in_pos < props_size) return LZMA_DATA_ERROR; - const lzma_ret ret = lzma_properties_decode( + ret = lzma_properties_decode( filter, allocator, in + *in_pos, props_size); *in_pos += props_size; diff --git a/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c index d110566..755c407 100644 --- a/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c @@ -31,6 +31,8 @@ extern LZMA_API(lzma_ret) lzma_filter_flags_encode(const lzma_filter *filter, uint8_t *out, size_t *out_pos, size_t out_size) { + uint32_t props_size; + // Filter ID if (filter->id >= LZMA_FILTER_RESERVED_START) return LZMA_PROG_ERROR; @@ -39,7 +41,6 @@ lzma_filter_flags_encode(const lzma_filter *filter, out, out_pos, out_size)); // Size of Properties - uint32_t props_size; return_if_error(lzma_properties_size(&props_size, filter)); return_if_error(lzma_vli_encode(props_size, NULL, out, out_pos, out_size)); diff --git a/Utilities/cmliblzma/liblzma/common/index.c b/Utilities/cmliblzma/liblzma/common/index.c index 9af4bc1..f0f67ca 100644 --- a/Utilities/cmliblzma/liblzma/common/index.c +++ b/Utilities/cmliblzma/liblzma/common/index.c @@ -230,6 +230,7 @@ index_tree_end(index_tree *tree, lzma_allocator *allocator, static void index_tree_append(index_tree *tree, index_tree_node *node) { + uint32_t up; node->parent = tree->rightmost; node->left = NULL; node->right = NULL; @@ -258,8 +259,10 @@ index_tree_append(index_tree *tree, index_tree_node *node) // and thus know the state of the tree just by looking at the node // count. From the node count we can calculate how many steps to go // up in the tree to find the rotation root. - uint32_t up = tree->count ^ (UINT32_C(1) << bsr32(tree->count)); + up = tree->count ^ (UINT32_C(1) << bsr32(tree->count)); if (up != 0) { + index_tree_node *pivot; + // Locate the root node for the rotation. up = ctz32(tree->count) + 2; do { @@ -267,7 +270,7 @@ index_tree_append(index_tree *tree, index_tree_node *node) } while (--up > 0); // Rotate left using node as the rotation root. - index_tree_node *pivot = node->right; + pivot = node->right; if (node->parent == NULL) { tree->root = pivot; @@ -397,11 +400,13 @@ index_init_plain(lzma_allocator *allocator) extern LZMA_API(lzma_index *) lzma_index_init(lzma_allocator *allocator) { + index_stream *s; + lzma_index *i = index_init_plain(allocator); if (i == NULL) return NULL; - index_stream *s = index_stream_init(0, 0, 1, 0, allocator); + s = index_stream_init(0, 0, 1, 0, allocator); if (s == NULL) { lzma_free(i, allocator); return NULL; @@ -600,6 +605,8 @@ lzma_index_padding_size(const lzma_index *i) extern LZMA_API(lzma_ret) lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) { + index_stream *s; + if (i == NULL || stream_flags == NULL) return LZMA_PROG_ERROR; @@ -607,7 +614,7 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) return_if_error(lzma_stream_flags_compare( stream_flags, stream_flags)); - index_stream *s = (index_stream *)(i->streams.rightmost); + s = (index_stream *)(i->streams.rightmost); s->stream_flags = *stream_flags; return LZMA_OK; @@ -617,14 +624,17 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) extern LZMA_API(lzma_ret) lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding) { + index_stream *s; + lzma_vli old_stream_padding; + if (i == NULL || stream_padding > LZMA_VLI_MAX || (stream_padding & 3) != 0) return LZMA_PROG_ERROR; - index_stream *s = (index_stream *)(i->streams.rightmost); + s = (index_stream *)(i->streams.rightmost); // Check that the new value won't make the file grow too big. - const lzma_vli old_stream_padding = s->stream_padding; + old_stream_padding = s->stream_padding; s->stream_padding = 0; if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) { s->stream_padding = old_stream_padding; @@ -640,20 +650,26 @@ extern LZMA_API(lzma_ret) lzma_index_append(lzma_index *i, lzma_allocator *allocator, lzma_vli unpadded_size, lzma_vli uncompressed_size) { + index_stream *s; + index_group *g; + lzma_vli compressed_base; + lzma_vli uncompressed_base; + uint32_t index_list_size_add; + // Validate. if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN || unpadded_size > UNPADDED_SIZE_MAX || uncompressed_size > LZMA_VLI_MAX) return LZMA_PROG_ERROR; - index_stream *s = (index_stream *)(i->streams.rightmost); - index_group *g = (index_group *)(s->groups.rightmost); + s = (index_stream *)(i->streams.rightmost); + g = (index_group *)(s->groups.rightmost); - const lzma_vli compressed_base = g == NULL ? 0 + compressed_base = g == NULL ? 0 : vli_ceil4(g->records[g->last].unpadded_sum); - const lzma_vli uncompressed_base = g == NULL ? 0 + uncompressed_base = g == NULL ? 0 : g->records[g->last].uncompressed_sum; - const uint32_t index_list_size_add = lzma_vli_size(unpadded_size) + index_list_size_add = lzma_vli_size(unpadded_size) + lzma_vli_size(uncompressed_size); // Check that the file size will stay within limits. @@ -767,6 +783,7 @@ extern LZMA_API(lzma_ret) lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, lzma_allocator *allocator) { + index_cat_info info; const lzma_vli dest_file_size = lzma_index_file_size(dest); // Check that we don't exceed the file size limits. @@ -796,10 +813,12 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, index_stream *s = (index_stream *)(dest->streams.rightmost); index_group *g = (index_group *)(s->groups.rightmost); if (g != NULL && g->last + 1 < g->allocated) { + index_group *newg; + assert(g->node.left == NULL); assert(g->node.right == NULL); - index_group *newg = lzma_alloc(sizeof(index_group) + newg = lzma_alloc(sizeof(index_group) + (g->last + 1) * sizeof(index_record), allocator); @@ -834,13 +853,12 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, // Add all the Streams from src to dest. Update the base offsets // of each Stream from src. - const index_cat_info info = { - .uncompressed_size = dest->uncompressed_size, - .file_size = dest_file_size, - .stream_number_add = dest->streams.count, - .block_number_add = dest->record_count, - .streams = &dest->streams, - }; + info.uncompressed_size = dest->uncompressed_size; + info.file_size = dest_file_size; + info.stream_number_add = dest->streams.count; + info.block_number_add = dest->record_count; + info.streams = &dest->streams; + index_cat_helper(&info, (index_stream *)(src->streams.root)); // Update info about all the combined Streams. @@ -861,12 +879,17 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, static index_stream * index_dup_stream(const index_stream *src, lzma_allocator *allocator) { + index_stream *dest; + index_group *destg; + index_group *srcg; + size_t i = 0; + // Catch a somewhat theoretical integer overflow. if (src->record_count > PREALLOC_MAX) return NULL; // Allocate and initialize a new Stream. - index_stream *dest = index_stream_init(src->node.compressed_base, + dest = index_stream_init(src->node.compressed_base, src->node.uncompressed_base, src->number, src->block_number_base, allocator); @@ -884,7 +907,7 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator) // Allocate memory for the Records. We put all the Records into // a single group. It's simplest and also tends to make // lzma_index_locate() a little bit faster with very big Indexes. - index_group *destg = lzma_alloc(sizeof(index_group) + destg = lzma_alloc(sizeof(index_group) + src->record_count * sizeof(index_record), allocator); if (destg == NULL) { @@ -900,8 +923,7 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator) destg->last = src->record_count - 1; // Go through all the groups in src and copy the Records into destg. - const index_group *srcg = (const index_group *)(src->groups.leftmost); - size_t i = 0; + srcg = (index_group *)(src->groups.leftmost); do { memcpy(destg->records + i, srcg->records, (srcg->last + 1) * sizeof(index_record)); @@ -921,6 +943,9 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator) extern LZMA_API(lzma_index *) lzma_index_dup(const lzma_index *src, lzma_allocator *allocator) { + index_stream *srcstream; + index_stream *deststream; + // Allocate the base structure (no initial Stream). lzma_index *dest = index_init_plain(allocator); if (dest == NULL) @@ -933,11 +958,9 @@ lzma_index_dup(const lzma_index *src, lzma_allocator *allocator) dest->index_list_size = src->index_list_size; // Copy the Streams and the groups in them. - const index_stream *srcstream - = (const index_stream *)(src->streams.leftmost); + srcstream = (index_stream *)(src->streams.leftmost); do { - index_stream *deststream = index_dup_stream( - srcstream, allocator); + deststream = index_dup_stream(srcstream, allocator); if (deststream == NULL) { lzma_index_end(dest, allocator); return NULL; @@ -1096,14 +1119,19 @@ lzma_index_iter_rewind(lzma_index_iter *iter) extern LZMA_API(lzma_bool) lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode) { + const lzma_index *i; + const index_stream *stream; + const index_group *group; + size_t record; + // Catch unsupported mode values. if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK) return true; - const lzma_index *i = iter->internal[ITER_INDEX].p; - const index_stream *stream = iter->internal[ITER_STREAM].p; - const index_group *group = NULL; - size_t record = iter->internal[ITER_RECORD].s; + i = iter->internal[ITER_INDEX].p; + stream = iter->internal[ITER_STREAM].p; + group = NULL; + record = iter->internal[ITER_RECORD].s; // If we are being asked for the next Stream, leave group to NULL // so that the rest of the this function thinks that this Stream @@ -1203,6 +1231,10 @@ again: extern LZMA_API(lzma_bool) lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target) { + const index_stream *stream; + const index_group *group; + size_t left, right; + const lzma_index *i = iter->internal[ITER_INDEX].p; // If the target is past the end of the file, return immediately. @@ -1210,12 +1242,12 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target) return true; // Locate the Stream containing the target offset. - const index_stream *stream = index_tree_locate(&i->streams, target); + stream = index_tree_locate(&i->streams, target); assert(stream != NULL); target -= stream->node.uncompressed_base; // Locate the group containing the target offset. - const index_group *group = index_tree_locate(&stream->groups, target); + group = index_tree_locate(&stream->groups, target); assert(group != NULL); // Use binary search to locate the exact Record. It is the first @@ -1223,8 +1255,8 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target) // This is because we want the rightmost Record that fullfills the // search criterion. It is possible that there are empty Blocks; // we don't want to return them. - size_t left = 0; - size_t right = group->last; + left = 0; + right = group->last; while (left < right) { const size_t pos = left + (right - left) / 2; diff --git a/Utilities/cmliblzma/liblzma/common/index_decoder.c b/Utilities/cmliblzma/liblzma/common/index_decoder.c index 83c8a3a..6c91f10 100644 --- a/Utilities/cmliblzma/liblzma/common/index_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/index_decoder.c @@ -289,7 +289,7 @@ index_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit) { - lzma_next_strm_init(index_decoder_init, strm, i, memlimit); + lzma_next_strm_init2(index_decoder_init, strm, i, memlimit); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; @@ -303,21 +303,23 @@ lzma_index_buffer_decode( lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size) { + lzma_coder coder; + lzma_ret ret; + + // Store the input start position so that we can restore it in case + // of an error. + const size_t in_start = *in_pos; + // Sanity checks if (i == NULL || memlimit == NULL || in == NULL || in_pos == NULL || *in_pos > in_size) return LZMA_PROG_ERROR; // Initialize the decoder. - lzma_coder coder; return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit)); - // Store the input start position so that we can restore it in case - // of an error. - const size_t in_start = *in_pos; - // Do the actual decoding. - lzma_ret ret = index_decode(&coder, allocator, in, in_pos, in_size, + ret = index_decode(&coder, allocator, in, in_pos, in_size, NULL, NULL, 0, LZMA_RUN); if (ret == LZMA_STREAM_END) { diff --git a/Utilities/cmliblzma/liblzma/common/index_encoder.c b/Utilities/cmliblzma/liblzma/common/index_encoder.c index 45919f0..a6f8598 100644 --- a/Utilities/cmliblzma/liblzma/common/index_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/index_encoder.c @@ -207,7 +207,7 @@ lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_index_encoder(lzma_stream *strm, const lzma_index *i) { - lzma_next_strm_init(lzma_index_encoder_init, strm, i); + lzma_next_strm_init1(lzma_index_encoder_init, strm, i); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; @@ -220,6 +220,10 @@ extern LZMA_API(lzma_ret) lzma_index_buffer_encode(const lzma_index *i, uint8_t *out, size_t *out_pos, size_t out_size) { + lzma_coder coder; + size_t out_start; + lzma_ret ret; + // Validate the arguments. if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; @@ -230,13 +234,12 @@ lzma_index_buffer_encode(const lzma_index *i, // The Index encoder needs just one small data structure so we can // allocate it on stack. - lzma_coder coder; index_encoder_reset(&coder, i); // Do the actual encoding. This should never fail, but store // the original *out_pos just in case. - const size_t out_start = *out_pos; - lzma_ret ret = index_encode(&coder, NULL, NULL, NULL, 0, + out_start = *out_pos; + ret = index_encode(&coder, NULL, NULL, NULL, 0, out, out_pos, out_size, LZMA_RUN); if (ret == LZMA_STREAM_END) { diff --git a/Utilities/cmliblzma/liblzma/common/index_hash.c b/Utilities/cmliblzma/liblzma/common/index_hash.c index e3e9386..0cf86b3 100644 --- a/Utilities/cmliblzma/liblzma/common/index_hash.c +++ b/Utilities/cmliblzma/liblzma/common/index_hash.c @@ -124,13 +124,14 @@ static lzma_ret hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size, lzma_vli uncompressed_size) { + const lzma_vli sizes[2] = { unpadded_size, uncompressed_size }; + info->blocks_size += vli_ceil4(unpadded_size); info->uncompressed_size += uncompressed_size; info->index_list_size += lzma_vli_size(unpadded_size) + lzma_vli_size(uncompressed_size); ++info->count; - const lzma_vli sizes[2] = { unpadded_size, uncompressed_size }; lzma_check_update(&info->check, LZMA_CHECK_BEST, (const uint8_t *)(sizes), sizeof(sizes)); @@ -173,6 +174,9 @@ extern LZMA_API(lzma_ret) lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in, size_t *in_pos, size_t in_size) { + size_t in_start; + lzma_ret ret; + // Catch zero input buffer here, because in contrast to Index encoder // and decoder functions, applications call this function directly // instead of via lzma_code(), which does the buffer checking. @@ -182,8 +186,8 @@ lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in, // NOTE: This function has many similarities to index_encode() and // index_decode() functions found from index_encoder.c and // index_decoder.c. See the comments especially in index_encoder.c. - const size_t in_start = *in_pos; - lzma_ret ret = LZMA_OK; + in_start = *in_pos; + ret = LZMA_OK; while (*in_pos < in_size) switch (index_hash->sequence) { diff --git a/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c index ae75315..9e2e1da 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c @@ -19,6 +19,9 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags, const uint8_t *in, size_t *in_pos, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { + lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT; + lzma_ret ret; + // Sanity checks if (in_pos == NULL || (in == NULL && *in_pos != in_size) || *in_pos > in_size || out_pos == NULL @@ -33,8 +36,7 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags, // Initialize the Stream decoder. // TODO: We need something to tell the decoder that it can use the // output buffer as workspace, and thus save significant amount of RAM. - lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT; - lzma_ret ret = lzma_stream_decoder_init( + ret = lzma_stream_decoder_init( &stream_decoder, allocator, *memlimit, flags); if (ret == LZMA_OK) { diff --git a/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c index 2450ee2..8bca87f 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c @@ -45,6 +45,10 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos_ptr, size_t out_size) { + lzma_stream_flags stream_flags = { 0 }; + lzma_block block = { 0 }; + size_t out_pos; + // Sanity checks if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX || (in == NULL && in_size != 0) || out == NULL @@ -61,7 +65,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, // Use a local copy. We update *out_pos_ptr only if everything // succeeds. - size_t out_pos = *out_pos_ptr; + out_pos = *out_pos_ptr; // Check that there's enough space for both Stream Header and // Stream Footer. @@ -73,10 +77,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, out_size -= LZMA_STREAM_HEADER_SIZE; // Encode the Stream Header. - lzma_stream_flags stream_flags = { - .version = 0, - .check = check, - }; + stream_flags.check = check; if (lzma_stream_header_encode(&stream_flags, out + out_pos) != LZMA_OK) @@ -85,11 +86,8 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, out_pos += LZMA_STREAM_HEADER_SIZE; // Encode a Block but only if there is at least one byte of input. - lzma_block block = { - .version = 0, - .check = check, - .filters = filters, - }; + block.check = check; + block.filters = filters; if (in_size > 0) return_if_error(lzma_block_buffer_encode(&block, allocator, @@ -97,6 +95,8 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, // Index { + lzma_ret ret; + // Create an Index. It will have one Record if there was // at least one byte of input to encode. Otherwise the // Index will be empty. @@ -104,7 +104,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, if (i == NULL) return LZMA_MEM_ERROR; - lzma_ret ret = LZMA_OK; + ret = LZMA_OK; if (in_size > 0) ret = lzma_index_append(i, allocator, diff --git a/Utilities/cmliblzma/liblzma/common/stream_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_decoder.c index 37ea71e..56a009b 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_decoder.c @@ -106,6 +106,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, while (true) switch (coder->sequence) { case SEQ_STREAM_HEADER: { + lzma_ret ret; + // Copy the Stream Header to the internal buffer. lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos, LZMA_STREAM_HEADER_SIZE); @@ -117,7 +119,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, coder->pos = 0; // Decode the Stream Header. - const lzma_ret ret = lzma_stream_header_decode( + ret = lzma_stream_header_decode( &coder->stream_flags, coder->buffer); if (ret != LZMA_OK) return ret == LZMA_FORMAT_ERROR && !coder->first_stream @@ -154,6 +156,11 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Fall through case SEQ_BLOCK_HEADER: { + lzma_filter filters[LZMA_FILTERS_MAX + 1]; + uint64_t memusage; + lzma_ret ret; + size_t i; + if (*in_pos >= in_size) return LZMA_OK; @@ -188,7 +195,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Set up a buffer to hold the filter chain. Block Header // decoder will initialize all members of this array so // we don't need to do it here. - lzma_filter filters[LZMA_FILTERS_MAX + 1]; coder->block_options.filters = filters; // Decode the Block Header. @@ -196,9 +202,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, allocator, coder->buffer)); // Check the memory usage limit. - const uint64_t memusage = lzma_raw_decoder_memusage(filters); - lzma_ret ret; - + memusage = lzma_raw_decoder_memusage(filters); if (memusage == UINT64_MAX) { // One or more unknown Filter IDs. ret = LZMA_OPTIONS_ERROR; @@ -224,7 +228,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Free the allocated filter options since they are needed // only to initialize the Block decoder. - for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) + for (i = 0; i < LZMA_FILTERS_MAX; ++i) lzma_free(filters[i].options, allocator); coder->block_options.filters = NULL; @@ -260,6 +264,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, } case SEQ_INDEX: { + lzma_ret ret; + // If we don't have any input, don't call // lzma_index_hash_decode() since it would return // LZMA_BUF_ERROR, which we must not do here. @@ -268,7 +274,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Decode the Index and compare it to the hash calculated // from the sizes of the Blocks (if any). - const lzma_ret ret = lzma_index_hash_decode(coder->index_hash, + ret = lzma_index_hash_decode(coder->index_hash, in, in_pos, in_size); if (ret != LZMA_STREAM_END) return ret; @@ -279,6 +285,9 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Fall through case SEQ_STREAM_FOOTER: { + lzma_stream_flags footer_flags; + lzma_ret ret; + // Copy the Stream Footer to the internal buffer. lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos, LZMA_STREAM_HEADER_SIZE); @@ -292,8 +301,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator, // Decode the Stream Footer. The decoder gives // LZMA_FORMAT_ERROR if the magic bytes don't match, // so convert that return code to LZMA_DATA_ERROR. - lzma_stream_flags footer_flags; - const lzma_ret ret = lzma_stream_footer_decode( + ret = lzma_stream_footer_decode( &footer_flags, coder->buffer); if (ret != LZMA_OK) return ret == LZMA_FORMAT_ERROR @@ -442,7 +450,7 @@ lzma_stream_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern LZMA_API(lzma_ret) lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags) { - lzma_next_strm_init(lzma_stream_decoder_init, strm, memlimit, flags); + lzma_next_strm_init2(lzma_stream_decoder_init, strm, memlimit, flags); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_FINISH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/stream_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_encoder.c index 97a7a23..e2f2e10 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_encoder.c @@ -147,6 +147,8 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator, } case SEQ_BLOCK_ENCODE: { + lzma_vli unpadded_size; + static const lzma_action convert[4] = { LZMA_RUN, LZMA_SYNC_FLUSH, @@ -162,7 +164,7 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator, return ret; // Add a new Index Record. - const lzma_vli unpadded_size = lzma_block_unpadded_size( + unpadded_size = lzma_block_unpadded_size( &coder->block_options); assert(unpadded_size != 0); return_if_error(lzma_index_append(coder->index, allocator, @@ -174,6 +176,12 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator, } case SEQ_INDEX_ENCODE: { + const lzma_stream_flags stream_flags = { + 0, + lzma_index_size(coder->index), + coder->block_options.check, + }; + // Call the Index encoder. It doesn't take any input, so // those pointers can be NULL. const lzma_ret ret = coder->index_encoder.code( @@ -184,11 +192,6 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator, return ret; // Encode the Stream Footer into coder->buffer. - const lzma_stream_flags stream_flags = { - .version = 0, - .backward_size = lzma_index_size(coder->index), - .check = coder->block_options.check, - }; if (lzma_stream_footer_encode(&stream_flags, coder->buffer) != LZMA_OK) @@ -211,11 +214,13 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator, static void stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator) { + size_t i; + lzma_next_end(&coder->block_encoder, allocator); lzma_next_end(&coder->index_encoder, allocator); lzma_index_end(coder->index, allocator); - for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) + for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) lzma_free(coder->filters[i].options, allocator); lzma_free(coder, allocator); @@ -228,14 +233,18 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator, const lzma_filter *filters, const lzma_filter *reversed_filters) { + size_t i; + if (coder->sequence <= SEQ_BLOCK_INIT) { + lzma_ret ret; + // There is no incomplete Block waiting to be finished, // thus we can change the whole filter chain. Start by // trying to initialize the Block encoder with the new // chain. This way we detect if the chain is valid. coder->block_encoder_is_initialized = false; coder->block_options.filters = (lzma_filter *)(filters); - const lzma_ret ret = block_encoder_init(coder, allocator); + ret = block_encoder_init(coder, allocator); coder->block_options.filters = coder->filters; if (ret != LZMA_OK) return ret; @@ -255,7 +264,7 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator, } // Free the copy of the old chain and make a copy of the new chain. - for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) + for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) lzma_free(coder->filters[i].options, allocator); return lzma_filters_copy(filters, coder->filters, allocator); @@ -266,6 +275,8 @@ extern lzma_ret lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, const lzma_filter *filters, lzma_check check) { + lzma_stream_flags stream_flags = { 0, 0, check }; + lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator); if (filters == NULL) @@ -298,10 +309,6 @@ lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, return LZMA_MEM_ERROR; // Encode the Stream Header - lzma_stream_flags stream_flags = { - .version = 0, - .check = check, - }; return_if_error(lzma_stream_header_encode( &stream_flags, next->coder->buffer)); @@ -320,7 +327,7 @@ extern LZMA_API(lzma_ret) lzma_stream_encoder(lzma_stream *strm, const lzma_filter *filters, lzma_check check) { - lzma_next_strm_init(lzma_stream_encoder_init, strm, filters, check); + lzma_next_strm_init2(lzma_stream_encoder_init, strm, filters, check); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true; diff --git a/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c index 1bc2f97..8cf48a4 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c @@ -30,13 +30,15 @@ stream_flags_decode(lzma_stream_flags *options, const uint8_t *in) extern LZMA_API(lzma_ret) lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in) { + uint32_t crc; + // Magic if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0) return LZMA_FORMAT_ERROR; // Verify the CRC32 so we can distinguish between corrupt // and unsupported files. - const uint32_t crc = lzma_crc32(in + sizeof(lzma_header_magic), + crc = lzma_crc32(in + sizeof(lzma_header_magic), LZMA_STREAM_FLAGS_SIZE, 0); if (crc != unaligned_read32le(in + sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE)) @@ -59,13 +61,15 @@ lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in) extern LZMA_API(lzma_ret) lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in) { + uint32_t crc; + // Magic if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE, lzma_footer_magic, sizeof(lzma_footer_magic)) != 0) return LZMA_FORMAT_ERROR; // CRC32 - const uint32_t crc = lzma_crc32(in + sizeof(uint32_t), + crc = lzma_crc32(in + sizeof(uint32_t), sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0); if (crc != unaligned_read32le(in)) return LZMA_DATA_ERROR; diff --git a/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c index 4e71715..290339e 100644 --- a/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c +++ b/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c @@ -29,6 +29,8 @@ stream_flags_encode(const lzma_stream_flags *options, uint8_t *out) extern LZMA_API(lzma_ret) lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out) { + uint32_t crc; + assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE + 4 == LZMA_STREAM_HEADER_SIZE); @@ -43,7 +45,7 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out) return LZMA_PROG_ERROR; // CRC32 of the Stream Header - const uint32_t crc = lzma_crc32(out + sizeof(lzma_header_magic), + crc = lzma_crc32(out + sizeof(lzma_header_magic), LZMA_STREAM_FLAGS_SIZE, 0); unaligned_write32le(out + sizeof(lzma_header_magic) @@ -56,6 +58,8 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out) extern LZMA_API(lzma_ret) lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out) { + uint32_t crc; + assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic) == LZMA_STREAM_HEADER_SIZE); @@ -73,7 +77,7 @@ lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out) return LZMA_PROG_ERROR; // CRC32 - const uint32_t crc = lzma_crc32( + crc = lzma_crc32( out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0); unaligned_write32le(out, crc); diff --git a/Utilities/cmliblzma/liblzma/common/vli_size.c b/Utilities/cmliblzma/liblzma/common/vli_size.c index ec1b4fa..8b931e4 100644 --- a/Utilities/cmliblzma/liblzma/common/vli_size.c +++ b/Utilities/cmliblzma/liblzma/common/vli_size.c @@ -16,10 +16,11 @@ extern LZMA_API(uint32_t) lzma_vli_size(lzma_vli vli) { + uint32_t i = 0; + if (vli > LZMA_VLI_MAX) return 0; - uint32_t i = 0; do { vli >>= 7; ++i; diff --git a/Utilities/cmliblzma/liblzma/delta/delta_common.c b/Utilities/cmliblzma/liblzma/delta/delta_common.c index 930ad21..803e674 100644 --- a/Utilities/cmliblzma/liblzma/delta/delta_common.c +++ b/Utilities/cmliblzma/liblzma/delta/delta_common.c @@ -27,6 +27,8 @@ extern lzma_ret lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator, const lzma_filter_info *filters) { + const lzma_options_delta *opt; + // Allocate memory for the decoder if needed. if (next->coder == NULL) { next->coder = lzma_alloc(sizeof(lzma_coder), allocator); @@ -43,7 +45,7 @@ lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator, return LZMA_OPTIONS_ERROR; // Set the delta distance. - const lzma_options_delta *opt = filters[0].options; + opt = filters[0].options; next->coder->distance = opt->dist; // Initialize the rest of the variables. diff --git a/Utilities/cmliblzma/liblzma/delta/delta_decoder.c b/Utilities/cmliblzma/liblzma/delta/delta_decoder.c index 2cf60d5..582e58e 100644 --- a/Utilities/cmliblzma/liblzma/delta/delta_decoder.c +++ b/Utilities/cmliblzma/liblzma/delta/delta_decoder.c @@ -17,9 +17,10 @@ static void decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size) { + size_t i; const size_t distance = coder->distance; - for (size_t i = 0; i < size; ++i) { + for (i = 0; i < size; ++i) { buffer[i] += coder->history[(distance + coder->pos) & 0xFF]; coder->history[coder->pos-- & 0xFF] = buffer[i]; } @@ -32,11 +33,12 @@ delta_decode(lzma_coder *coder, lzma_allocator *allocator, size_t in_size, uint8_t *restrict out, size_t *restrict out_pos, size_t out_size, lzma_action action) { - assert(coder->next.code != NULL); - const size_t out_start = *out_pos; + lzma_ret ret; - const lzma_ret ret = coder->next.code(coder->next.coder, allocator, + assert(coder->next.code != NULL); + + ret = coder->next.code(coder->next.coder, allocator, in, in_pos, in_size, out, out_pos, out_size, action); @@ -59,11 +61,12 @@ extern lzma_ret lzma_delta_props_decode(void **options, lzma_allocator *allocator, const uint8_t *props, size_t props_size) { + lzma_options_delta *opt; + if (props_size != 1) return LZMA_OPTIONS_ERROR; - lzma_options_delta *opt - = lzma_alloc(sizeof(lzma_options_delta), allocator); + opt = lzma_alloc(sizeof(lzma_options_delta), allocator); if (opt == NULL) return LZMA_MEM_ERROR; diff --git a/Utilities/cmliblzma/liblzma/delta/delta_encoder.c b/Utilities/cmliblzma/liblzma/delta/delta_encoder.c index 15c7951..8b9e4a8 100644 --- a/Utilities/cmliblzma/liblzma/delta/delta_encoder.c +++ b/Utilities/cmliblzma/liblzma/delta/delta_encoder.c @@ -21,9 +21,10 @@ static void copy_and_encode(lzma_coder *coder, const uint8_t *restrict in, uint8_t *restrict out, size_t size) { + size_t i; const size_t distance = coder->distance; - for (size_t i = 0; i < size; ++i) { + for (i = 0; i < size; ++i) { const uint8_t tmp = coder->history[ (distance + coder->pos) & 0xFF]; coder->history[coder->pos-- & 0xFF] = in[i]; @@ -37,9 +38,10 @@ copy_and_encode(lzma_coder *coder, static void encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size) { + size_t i; const size_t distance = coder->distance; - for (size_t i = 0; i < size; ++i) { + for (i = 0; i < size; ++i) { const uint8_t tmp = coder->history[ (distance + coder->pos) & 0xFF]; coder->history[coder->pos-- & 0xFF] = buffer[i]; @@ -109,12 +111,13 @@ lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern lzma_ret lzma_delta_props_encode(const void *options, uint8_t *out) { + const lzma_options_delta *opt = options; + // The caller must have already validated the options, so it's // LZMA_PROG_ERROR if they are invalid. if (lzma_delta_coder_memusage(options) == UINT64_MAX) return LZMA_PROG_ERROR; - const lzma_options_delta *opt = options; out[0] = opt->dist - LZMA_DELTA_DIST_MIN; return LZMA_OK; diff --git a/Utilities/cmliblzma/liblzma/lz/lz_decoder.c b/Utilities/cmliblzma/liblzma/lz/lz_decoder.c index d74085c..f45984e 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_decoder.c +++ b/Utilities/cmliblzma/liblzma/lz/lz_decoder.c @@ -69,13 +69,17 @@ decode_buffer(lzma_coder *coder, size_t *restrict out_pos, size_t out_size) { while (true) { + size_t copy_size; + size_t dict_start; + lzma_ret ret; + // Wrap the dictionary if needed. if (coder->dict.pos == coder->dict.size) coder->dict.pos = 0; // Store the current dictionary position. It is needed to know // where to start copying to the out[] buffer. - const size_t dict_start = coder->dict.pos; + dict_start = coder->dict.pos; // Calculate how much we allow coder->lz.code() to decode. // It must not decode past the end of the dictionary @@ -86,13 +90,13 @@ decode_buffer(lzma_coder *coder, coder->dict.size - coder->dict.pos); // Call the coder->lz.code() to do the actual decoding. - const lzma_ret ret = coder->lz.code( + ret = coder->lz.code( coder->lz.coder, &coder->dict, in, in_pos, in_size); // Copy the decoded data from the dictionary to the out[] // buffer. - const size_t copy_size = coder->dict.pos - dict_start; + copy_size = coder->dict.pos - dict_start; assert(copy_size <= out_size - *out_pos); memcpy(out + *out_pos, coder->dict.buf + dict_start, copy_size); @@ -139,13 +143,15 @@ lz_decode(lzma_coder *coder, // We aren't the last coder in the chain, we need to decode // our input to a temporary buffer. while (*out_pos < out_size) { + lzma_ret ret; + // Fill the temporary buffer if it is empty. if (!coder->next_finished && coder->temp.pos == coder->temp.size) { coder->temp.pos = 0; coder->temp.size = 0; - const lzma_ret ret = coder->next.code( + ret = coder->next.code( coder->next.coder, allocator, in, in_pos, in_size, coder->temp.buffer, &coder->temp.size, @@ -167,7 +173,7 @@ lz_decode(lzma_coder *coder, return LZMA_OK; } - const lzma_ret ret = decode_buffer(coder, coder->temp.buffer, + ret = decode_buffer(coder, coder->temp.buffer, &coder->temp.pos, coder->temp.size, out, out_pos, out_size); @@ -206,6 +212,8 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, lzma_allocator *allocator, const void *options, lzma_lz_options *lz_options)) { + lzma_lz_options lz_options; + // Allocate the base structure if it isn't already allocated. if (next->coder == NULL) { next->coder = lzma_alloc(sizeof(lzma_coder), allocator); @@ -223,7 +231,6 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator, // Allocate and initialize the LZ-based decoder. It will also give // us the dictionary size. - lzma_lz_options lz_options; return_if_error(lz_init(&next->coder->lz, allocator, filters[0].options, &lz_options)); diff --git a/Utilities/cmliblzma/liblzma/lz/lz_decoder.h b/Utilities/cmliblzma/liblzma/lz/lz_decoder.h index 7266e80..2d19cac 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_decoder.h +++ b/Utilities/cmliblzma/liblzma/lz/lz_decoder.h @@ -72,14 +72,14 @@ typedef struct { } lzma_lz_decoder; -#define LZMA_LZ_DECODER_INIT \ - (lzma_lz_decoder){ \ - .coder = NULL, \ - .code = NULL, \ - .reset = NULL, \ - .set_uncompressed = NULL, \ - .end = NULL, \ - } +static const lzma_lz_decoder LZMA_LZ_DECODER_INIT = + { + NULL, + NULL, + NULL, + NULL, + NULL, + }; extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next, @@ -151,13 +151,15 @@ dict_repeat(lzma_dict *dict, uint32_t distance, uint32_t *len) dict->pos += left; } else { + uint32_t copy_pos; + uint32_t copy_size; + // The bigger the dictionary, the more rare this // case occurs. We need to "wrap" the dict, thus // we might need two memcpy() to copy all the data. assert(dict->full == dict->size); - const uint32_t copy_pos - = dict->pos - distance - 1 + dict->size; - uint32_t copy_size = dict->size - copy_pos; + copy_pos = dict->pos - distance - 1 + dict->size; + copy_size = dict->size - copy_pos; if (copy_size < left) { memmove(dict->buf + dict->pos, dict->buf + copy_pos, diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder.c b/Utilities/cmliblzma/liblzma/lz/lz_encoder.c index e240696..a735c21 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_encoder.c +++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder.c @@ -43,16 +43,18 @@ struct lzma_coder_s { static void move_window(lzma_mf *mf) { + uint32_t move_offset; + size_t move_size; + // Align the move to a multiple of 16 bytes. Some LZ-based encoders // like LZMA use the lowest bits of mf->read_pos to know the // alignment of the uncompressed data. We also get better speed // for memmove() with aligned buffers. assert(mf->read_pos > mf->keep_size_before); - const uint32_t move_offset - = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15); + move_offset = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15); assert(mf->write_pos > move_offset); - const size_t move_size = mf->write_pos - move_offset; + move_size = mf->write_pos - move_offset; assert(move_offset + move_size <= mf->size); @@ -79,6 +81,9 @@ static lzma_ret fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size, lzma_action action) { + size_t write_pos; + lzma_ret ret; + assert(coder->mf.read_pos <= coder->mf.write_pos); // Move the sliding window if needed. @@ -88,8 +93,7 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in, // Maybe this is ugly, but lzma_mf uses uint32_t for most things // (which I find cleanest), but we need size_t here when filling // the history window. - size_t write_pos = coder->mf.write_pos; - lzma_ret ret; + write_pos = coder->mf.write_pos; if (coder->next.code == NULL) { // Not using a filter, simply memcpy() as much as possible. lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer, @@ -156,6 +160,8 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator, { while (*out_pos < out_size && (*in_pos < in_size || action != LZMA_RUN)) { + lzma_ret ret; + // Read more data to coder->mf.buffer if needed. if (coder->mf.action == LZMA_RUN && coder->mf.read_pos >= coder->mf.read_limit) @@ -163,7 +169,7 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator, in, in_pos, in_size, action)); // Encode - const lzma_ret ret = coder->lz.code(coder->lz.coder, + ret = coder->lz.code(coder->lz.coder, &coder->mf, out, out_pos, out_size); if (ret != LZMA_OK) { // Setting this to LZMA_RUN for cases when we are @@ -182,6 +188,14 @@ static bool lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator, const lzma_lz_options *lz_options) { + bool is_bt; + uint32_t new_count; + uint32_t reserve; + uint32_t old_size; + uint32_t hash_bytes; + uint32_t hs; + uint32_t old_count; + // For now, the dictionary size is limited to 1.5 GiB. This may grow // in the future if needed, but it needs a little more work than just // changing this check. @@ -207,14 +221,14 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator, // to size_t. // - Memory usage calculation needs something too, e.g. use uint64_t // for mf->size. - uint32_t reserve = lz_options->dict_size / 2; + reserve = lz_options->dict_size / 2; if (reserve > (UINT32_C(1) << 30)) reserve /= 2; reserve += (lz_options->before_size + lz_options->match_len_max + lz_options->after_size) / 2 + (UINT32_C(1) << 19); - const uint32_t old_size = mf->size; + old_size = mf->size; mf->size = mf->keep_size_before + reserve + mf->keep_size_after; // Deallocate the old history buffer if it exists but has different @@ -284,12 +298,11 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator, // Calculate the sizes of mf->hash and mf->son and check that // nice_len is big enough for the selected match finder. - const uint32_t hash_bytes = lz_options->match_finder & 0x0F; + hash_bytes = lz_options->match_finder & 0x0F; if (hash_bytes > mf->nice_len) return true; - const bool is_bt = (lz_options->match_finder & 0x10) != 0; - uint32_t hs; + is_bt = (lz_options->match_finder & 0x10) != 0; if (hash_bytes == 2) { hs = 0xFFFF; @@ -331,13 +344,13 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator, // hash_size_sum + sons_count cannot overflow. assert(hs < UINT32_MAX / 5); - const uint32_t old_count = mf->hash_size_sum + mf->sons_count; + old_count = mf->hash_size_sum + mf->sons_count; mf->hash_size_sum = hs; mf->sons_count = mf->cyclic_size; if (is_bt) mf->sons_count *= 2; - const uint32_t new_count = mf->hash_size_sum + mf->sons_count; + new_count = mf->hash_size_sum + mf->sons_count; // Deallocate the old hash array if it exists and has different size // than what is needed now. @@ -363,6 +376,8 @@ static bool lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator, const lzma_lz_options *lz_options) { + size_t alloc_count; + // Allocate the history buffer. if (mf->buffer == NULL) { mf->buffer = lzma_alloc(mf->size, allocator); @@ -382,7 +397,7 @@ lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator, mf->pending = 0; // Allocate match finder's hash array. - const size_t alloc_count = mf->hash_size_sum + mf->sons_count; + alloc_count = mf->hash_size_sum + mf->sons_count; #if UINT32_MAX >= SIZE_MAX / 4 // Check for integer overflow. (Huge dictionaries are not @@ -442,12 +457,7 @@ extern uint64_t lzma_lz_encoder_memusage(const lzma_lz_options *lz_options) { // Old buffers must not exist when calling lz_encoder_prepare(). - lzma_mf mf = { - .buffer = NULL, - .hash = NULL, - .hash_size_sum = 0, - .sons_count = 0, - }; + lzma_mf mf = { NULL }; // Setup the size information into mf. if (lz_encoder_prepare(&mf, NULL, lz_options)) @@ -501,6 +511,8 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, lzma_allocator *allocator, const void *options, lzma_lz_options *lz_options)) { + lzma_lz_options lz_options; + #ifdef HAVE_SMALL // We need that the CRC32 table has been initialized. lzma_crc32_init(); @@ -529,7 +541,6 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, } // Initialize the LZ-based encoder. - lzma_lz_options lz_options; return_if_error(lz_init(&next->coder->lz, allocator, filters[0].options, &lz_options)); diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder.h b/Utilities/cmliblzma/liblzma/lz/lz_encoder.h index 741c453..647f5e2 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_encoder.h +++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder.h @@ -218,7 +218,7 @@ typedef struct { /// Get pointer to the first byte not ran through the match finder -static inline const uint8_t * +static inline uint8_t * mf_ptr(const lzma_mf *mf) { return mf->buffer + mf->read_pos; diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h b/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h index 342a333..de17c54 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h +++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h @@ -39,25 +39,22 @@ // Endianness doesn't matter in hash_2_calc() (no effect on the output). #ifdef TUKLIB_FAST_UNALIGNED_ACCESS # define hash_2_calc() \ - const uint32_t hash_value = *(const uint16_t *)(cur) + hash_value = *(const uint16_t *)(cur) #else # define hash_2_calc() \ - const uint32_t hash_value \ - = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8) + hash_value = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8) #endif #define hash_3_calc() \ - const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \ - const uint32_t hash_2_value = temp & HASH_2_MASK; \ - const uint32_t hash_value \ - = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask + temp = hash_table[cur[0]] ^ cur[1]; \ + hash_2_value = temp & HASH_2_MASK; \ + hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask #define hash_4_calc() \ - const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \ - const uint32_t hash_2_value = temp & HASH_2_MASK; \ - const uint32_t hash_3_value \ - = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \ - const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \ + temp = hash_table[cur[0]] ^ cur[1]; \ + hash_2_value = temp & HASH_2_MASK; \ + hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \ + hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \ ^ (hash_table[cur[3]] << 5)) & mf->hash_mask diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c b/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c index f82a1c1..50c3459 100644 --- a/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c +++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c @@ -32,8 +32,9 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) if (count > 0) { #ifndef NDEBUG + uint32_t i; // Validate the matches. - for (uint32_t i = 0; i < count; ++i) { + for (i = 0; i < count; ++i) { assert(matches[i].len <= mf->nice_len); assert(matches[i].dist < mf->read_pos); assert(memcmp(mf_ptr(mf) - 1, @@ -49,6 +50,9 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) // If a match of maximum search length was found, try to // extend the match to maximum possible length. if (len_best == mf->nice_len) { + uint8_t *p1; + uint8_t *p2; + // The limit for the match length is either the // maximum match length supported by the LZ-based // encoder or the number of bytes left in the @@ -59,11 +63,11 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) // Pointer to the byte we just ran through // the match finder. - const uint8_t *p1 = mf_ptr(mf) - 1; + p1 = mf_ptr(mf) - 1; // Pointer to the beginning of the match. We need -1 // here because the match distances are zero based. - const uint8_t *p2 = p1 - matches[count - 1].dist - 1; + p2 = p1 - matches[count - 1].dist - 1; while (len_best < limit && p1[len_best] == p2[len_best]) @@ -108,18 +112,22 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) static void normalize(lzma_mf *mf) { + uint32_t i; + uint32_t subvalue; + uint32_t count; + uint32_t *hash; + assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS); // In future we may not want to touch the lowest bits, because there // may be match finders that use larger resolution than one byte. - const uint32_t subvalue - = (MUST_NORMALIZE_POS - mf->cyclic_size); + subvalue = (MUST_NORMALIZE_POS - mf->cyclic_size); // & (~(UINT32_C(1) << 10) - 1); - const uint32_t count = mf->hash_size_sum + mf->sons_count; - uint32_t *hash = mf->hash; + count = mf->hash_size_sum + mf->sons_count; + hash = mf->hash; - for (uint32_t i = 0; i < count; ++i) { + for (i = 0; i < count; ++i) { // If the distance is greater than the dictionary size, // we can simply mark the hash element as empty. // @@ -196,15 +204,14 @@ move_pending(lzma_mf *mf) move_pending(mf); \ ret_op; \ } \ - const uint8_t *cur = mf_ptr(mf); \ - const uint32_t pos = mf->read_pos + mf->offset + cur = mf_ptr(mf); \ + pos = mf->read_pos + mf->offset /// Header for find functions. "return 0" indicates that zero matches /// were found. #define header_find(is_bt, len_min) \ - header(is_bt, len_min, return 0); \ - uint32_t matches_count = 0 + header(is_bt, len_min, return 0) /// Header for a loop in a skip function. "continue" tells to skip the rest @@ -261,10 +268,11 @@ hc_find_func( while (true) { const uint32_t delta = pos - cur_match; + const uint8_t *pb; if (depth-- == 0 || delta >= cyclic_size) return matches; - const uint8_t *const pb = cur - delta; + pb = cur - delta; cur_match = son[cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)]; @@ -305,18 +313,23 @@ do { \ extern uint32_t lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches) { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */ + uint32_t delta2, cur_match; + uint32_t len_best = 2; + uint32_t matches_count = 0; + header_find(false, 3); hash_3_calc(); - const uint32_t delta2 = pos - mf->hash[hash_2_value]; - const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; + delta2 = pos - mf->hash[hash_2_value]; + cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; - uint32_t len_best = 2; - if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { for ( ; len_best != len_limit; ++len_best) if (*(cur + len_best - delta2) != cur[len_best]) @@ -340,18 +353,22 @@ extern void lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount) { do { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */ + uint32_t cur_match; + if (mf_avail(mf) < 3) { move_pending(mf); continue; } - const uint8_t *cur = mf_ptr(mf); - const uint32_t pos = mf->read_pos + mf->offset; + cur = mf_ptr(mf); + pos = mf->read_pos + mf->offset; hash_3_calc(); - const uint32_t cur_match - = mf->hash[FIX_3_HASH_SIZE + hash_value]; + cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; @@ -367,21 +384,25 @@ lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount) extern uint32_t lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches) { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */ + uint32_t delta2, delta3, cur_match; + uint32_t len_best = 1; + uint32_t matches_count = 0; + header_find(false, 4); hash_4_calc(); - uint32_t delta2 = pos - mf->hash[hash_2_value]; - const uint32_t delta3 - = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; - const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; + delta2 = pos - mf->hash[hash_2_value]; + delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; + cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value ] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; - uint32_t len_best = 1; - if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = 2; matches[0].len = 2; @@ -420,18 +441,22 @@ extern void lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount) { do { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */ + uint32_t cur_match; + if (mf_avail(mf) < 4) { move_pending(mf); continue; } - const uint8_t *cur = mf_ptr(mf); - const uint32_t pos = mf->read_pos + mf->offset; + cur = mf_ptr(mf); + pos = mf->read_pos + mf->offset; hash_4_calc(); - const uint32_t cur_match - = mf->hash[FIX_4_HASH_SIZE + hash_value]; + cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; @@ -469,6 +494,10 @@ bt_find_func( uint32_t len1 = 0; while (true) { + uint32_t *pair; + const uint8_t *pb; + uint32_t len; + const uint32_t delta = pos - cur_match; if (depth-- == 0 || delta >= cyclic_size) { *ptr0 = EMPTY_HASH_VALUE; @@ -476,12 +505,12 @@ bt_find_func( return matches; } - uint32_t *const pair = son + ((cyclic_pos - delta + pair = son + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)) << 1); - const uint8_t *const pb = cur - delta; - uint32_t len = my_min(len0, len1); + pb = cur - delta; + len = my_min(len0, len1); if (pb[len] == cur[len]) { while (++len != len_limit) @@ -535,6 +564,10 @@ bt_skip_func( uint32_t len1 = 0; while (true) { + uint32_t *pair; + const uint8_t *pb; + uint32_t len; + const uint32_t delta = pos - cur_match; if (depth-- == 0 || delta >= cyclic_size) { *ptr0 = EMPTY_HASH_VALUE; @@ -542,11 +575,11 @@ bt_skip_func( return; } - uint32_t *pair = son + ((cyclic_pos - delta + pair = son + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)) << 1); - const uint8_t *pb = cur - delta; - uint32_t len = my_min(len0, len1); + pb = cur - delta; + len = my_min(len0, len1); if (pb[len] == cur[len]) { while (++len != len_limit) @@ -593,11 +626,17 @@ do { \ extern uint32_t lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches) { + const uint8_t *cur; + uint32_t pos; + uint32_t hash_value; /* hash_2_calc */ + uint32_t cur_match; + uint32_t matches_count = 0; + header_find(true, 2); hash_2_calc(); - const uint32_t cur_match = mf->hash[hash_value]; + cur_match = mf->hash[hash_value]; mf->hash[hash_value] = pos; bt_find(1); @@ -608,11 +647,16 @@ extern void lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount) { do { + const uint8_t *cur; + uint32_t pos; + uint32_t hash_value; /* hash_2_calc */ + uint32_t cur_match; + header_skip(true, 2); hash_2_calc(); - const uint32_t cur_match = mf->hash[hash_value]; + cur_match = mf->hash[hash_value]; mf->hash[hash_value] = pos; bt_skip(); @@ -626,18 +670,23 @@ lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount) extern uint32_t lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches) { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */ + uint32_t delta2, cur_match; + uint32_t len_best = 2; + uint32_t matches_count = 0; + header_find(true, 3); hash_3_calc(); - const uint32_t delta2 = pos - mf->hash[hash_2_value]; - const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; + delta2 = pos - mf->hash[hash_2_value]; + cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; - uint32_t len_best = 2; - if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { for ( ; len_best != len_limit; ++len_best) if (*(cur + len_best - delta2) != cur[len_best]) @@ -661,12 +710,16 @@ extern void lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount) { do { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */ + uint32_t cur_match; + header_skip(true, 3); hash_3_calc(); - const uint32_t cur_match - = mf->hash[FIX_3_HASH_SIZE + hash_value]; + cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; @@ -682,21 +735,25 @@ lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount) extern uint32_t lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches) { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */ + uint32_t delta2, delta3, cur_match; + uint32_t len_best = 1; + uint32_t matches_count = 0; + header_find(true, 4); hash_4_calc(); - uint32_t delta2 = pos - mf->hash[hash_2_value]; - const uint32_t delta3 - = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; - const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; + delta2 = pos - mf->hash[hash_2_value]; + delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; + cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; - uint32_t len_best = 1; - if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = 2; matches[0].len = 2; @@ -735,12 +792,16 @@ extern void lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount) { do { + const uint8_t *cur; + uint32_t pos; + uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */ + uint32_t cur_match; + header_skip(true, 4); hash_4_calc(); - const uint32_t cur_match - = mf->hash[FIX_4_HASH_SIZE + hash_value]; + cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; diff --git a/Utilities/cmliblzma/liblzma/lzma/fastpos.h b/Utilities/cmliblzma/liblzma/lzma/fastpos.h index 4aea231..5a834d6 100644 --- a/Utilities/cmliblzma/liblzma/lzma/fastpos.h +++ b/Utilities/cmliblzma/liblzma/lzma/fastpos.h @@ -75,6 +75,8 @@ // on all systems I have tried. The size optimized version is sometimes // slightly faster, but sometimes it is a lot slower. +#include "config.h" + #ifdef HAVE_SMALL # define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos)) diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c index 3e42575..ca14c4a 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c @@ -224,6 +224,8 @@ static lzma_ret lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator, const void *opt, lzma_lz_options *lz_options) { + const lzma_options_lzma *options = opt; + if (lz->coder == NULL) { lz->coder = lzma_alloc(sizeof(lzma_coder), allocator); if (lz->coder == NULL) @@ -235,8 +237,6 @@ lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator, lz->coder->lzma = LZMA_LZ_DECODER_INIT; } - const lzma_options_lzma *options = opt; - lz->coder->sequence = SEQ_CONTROL; lz->coder->need_properties = true; lz->coder->need_dictionary_reset = options->preset_dict == NULL @@ -272,6 +272,8 @@ extern lzma_ret lzma_lzma2_props_decode(void **options, lzma_allocator *allocator, const uint8_t *props, size_t props_size) { + lzma_options_lzma *opt; + if (props_size != 1) return LZMA_OPTIONS_ERROR; @@ -283,8 +285,7 @@ lzma_lzma2_props_decode(void **options, lzma_allocator *allocator, if (props[0] > 40) return LZMA_OPTIONS_ERROR; - lzma_options_lzma *opt = lzma_alloc( - sizeof(lzma_options_lzma), allocator); + opt = lzma_alloc(sizeof(lzma_options_lzma), allocator); if (opt == NULL) return LZMA_MEM_ERROR; diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c index 992720c..8784f5d 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c @@ -54,13 +54,14 @@ struct lzma_coder_s { static void lzma2_header_lzma(lzma_coder *coder) { + size_t pos; + size_t size; + assert(coder->uncompressed_size > 0); assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX); assert(coder->compressed_size > 0); assert(coder->compressed_size <= LZMA2_CHUNK_MAX); - size_t pos; - if (coder->need_properties) { pos = 0; @@ -81,7 +82,7 @@ lzma2_header_lzma(lzma_coder *coder) coder->buf_pos = pos; // Uncompressed size - size_t size = coder->uncompressed_size - 1; + size = coder->uncompressed_size - 1; coder->buf[pos++] += size >> 16; coder->buf[pos++] = (size >> 8) & 0xFF; coder->buf[pos++] = size & 0xFF; @@ -162,6 +163,9 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf, // Fall through case SEQ_LZMA_ENCODE: { + uint32_t read_start; + lzma_ret ret; + // Calculate how much more uncompressed data this chunk // could accept. const uint32_t left = LZMA2_UNCOMPRESSED_MAX @@ -182,10 +186,10 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf, // Save the start position so that we can update // coder->uncompressed_size. - const uint32_t read_start = mf->read_pos - mf->read_ahead; + read_start = mf->read_pos - mf->read_ahead; // Call the LZMA encoder until the chunk is finished. - const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf, + ret = lzma_lzma_encode(coder->lzma, mf, coder->buf + LZMA2_HEADER_MAX, &coder->compressed_size, LZMA2_CHUNK_MAX, limit); @@ -273,6 +277,8 @@ lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator) static lzma_ret lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter) { + lzma_options_lzma *opt; + // New options can be set only when there is no incomplete chunk. // This is the case at the beginning of the raw stream and right // after LZMA_SYNC_FLUSH. @@ -281,7 +287,7 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter) // Look if there are new options. At least for now, // only lc/lp/pb can be changed. - const lzma_options_lzma *opt = filter->options; + opt = filter->options; if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp || coder->opt_cur.pb != opt->pb) { // Validate the options. diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h index e31e285..36267dc 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h @@ -129,12 +129,15 @@ static inline void literal_init(probability (*probs)[LITERAL_CODER_SIZE], uint32_t lc, uint32_t lp) { + uint32_t coders; + uint32_t i, j; + assert(lc + lp <= LZMA_LCLP_MAX); - const uint32_t coders = 1U << (lc + lp); + coders = 1U << (lc + lp); - for (uint32_t i = 0; i < coders; ++i) - for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j) + for (i = 0; i < coders; ++i) + for (j = 0; j < LITERAL_CODER_SIZE; ++j) bit_reset(probs[i][j]); return; diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c index 9979bb4..1bee2a9 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c @@ -114,33 +114,33 @@ do { \ case seq ## _CHOICE: \ rc_if_0(ld.choice, seq ## _CHOICE) { \ rc_update_0(ld.choice); \ - rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \ - rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \ - rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \ + rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW0); \ + rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW1); \ + rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW2); \ target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \ } else { \ rc_update_1(ld.choice); \ case seq ## _CHOICE2: \ rc_if_0(ld.choice2, seq ## _CHOICE2) { \ rc_update_0(ld.choice2); \ - rc_bit_case(ld.mid[pos_state][symbol], , , \ + rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \ seq ## _MID0); \ - rc_bit_case(ld.mid[pos_state][symbol], , , \ + rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \ seq ## _MID1); \ - rc_bit_case(ld.mid[pos_state][symbol], , , \ + rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \ seq ## _MID2); \ target = symbol - LEN_MID_SYMBOLS \ + MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \ } else { \ rc_update_1(ld.choice2); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \ - rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH0); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH1); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH2); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH3); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH4); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH5); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH6); \ + rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH7); \ target = symbol - LEN_HIGH_SYMBOLS \ + MATCH_LEN_MIN \ + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \ @@ -285,13 +285,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, const uint8_t *restrict in, size_t *restrict in_pos, size_t in_size) { - //////////////////// - // Initialization // - //////////////////// - - if (!rc_read_init(&coder->rc, in, in_pos, in_size)) - return LZMA_OK; - /////////////// // Variables // /////////////// @@ -338,6 +331,16 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos) dict.limit = dict.pos + (size_t)(coder->uncompressed_size); + //////////////////// + // Initialization // + //////////////////// + + if (!rc_read_init(&coder->rc, in, in_pos, in_size)) + return LZMA_OK; + + rc = coder->rc; + rc_in_pos = *in_pos; + // The main decoder loop. The "switch" is used to restart the decoder at // correct location. Once restarted, the "switch" is no longer used. switch (coder->sequence) @@ -353,6 +356,21 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, break; rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) { + static const lzma_lzma_state next_state[] = { + STATE_LIT_LIT, + STATE_LIT_LIT, + STATE_LIT_LIT, + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT + }; + rc_update_0(coder->is_match[state][pos_state]); // It's a literal i.e. a single 8-bit byte. @@ -370,16 +388,21 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, rc_bit(probs[symbol], , , SEQ_LITERAL); } while (symbol < (1 << 8)); #else - rc_bit_case(probs[symbol], , , SEQ_LITERAL0); - rc_bit_case(probs[symbol], , , SEQ_LITERAL1); - rc_bit_case(probs[symbol], , , SEQ_LITERAL2); - rc_bit_case(probs[symbol], , , SEQ_LITERAL3); - rc_bit_case(probs[symbol], , , SEQ_LITERAL4); - rc_bit_case(probs[symbol], , , SEQ_LITERAL5); - rc_bit_case(probs[symbol], , , SEQ_LITERAL6); - rc_bit_case(probs[symbol], , , SEQ_LITERAL7); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL0); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL1); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL2); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL3); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL4); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL5); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL6); + rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL7); #endif } else { +#ifndef HAVE_SMALL + uint32_t match_bit; + uint32_t subcoder_index; +#endif + // Decode literal with match byte. // // We store the byte we compare against @@ -418,8 +441,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, } while (symbol < (1 << 8)); #else // Unroll the loop. - uint32_t match_bit; - uint32_t subcoder_index; # define d(seq) \ case seq: \ @@ -453,20 +474,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, // Use a lookup table to update to literal state, // since compared to other state updates, this would // need two branches. - static const lzma_lzma_state next_state[] = { - STATE_LIT_LIT, - STATE_LIT_LIT, - STATE_LIT_LIT, - STATE_LIT_LIT, - STATE_MATCH_LIT_LIT, - STATE_REP_LIT_LIT, - STATE_SHORTREP_LIT_LIT, - STATE_MATCH_LIT, - STATE_REP_LIT, - STATE_SHORTREP_LIT, - STATE_MATCH_LIT, - STATE_REP_LIT - }; state = next_state[state]; case SEQ_LITERAL_WRITE: @@ -511,12 +518,12 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, rc_bit(probs[symbol], , , SEQ_POS_SLOT); } while (symbol < POS_SLOTS); #else - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT0); - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT1); - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT2); - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT3); - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT4); - rc_bit_case(probs[symbol], , , SEQ_POS_SLOT5); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT0); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT1); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT2); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT3); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT4); + rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT5); #endif // Get rid of the highest bit that was needed for // indexing of the probability array. @@ -564,25 +571,25 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, switch (limit) { case 5: assert(offset == 0); - rc_bit(probs[symbol], , + rc_bit(probs[symbol], 0, rep0 += 1, SEQ_POS_MODEL); ++offset; --limit; case 4: - rc_bit(probs[symbol], , + rc_bit(probs[symbol], 0, rep0 += 1 << offset, SEQ_POS_MODEL); ++offset; --limit; case 3: - rc_bit(probs[symbol], , + rc_bit(probs[symbol], 0, rep0 += 1 << offset, SEQ_POS_MODEL); ++offset; --limit; case 2: - rc_bit(probs[symbol], , + rc_bit(probs[symbol], 0, rep0 += 1 << offset, SEQ_POS_MODEL); ++offset; @@ -594,7 +601,7 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, // rc_bit_last() here to omit // the unneeded updating of // "symbol". - rc_bit_last(probs[symbol], , + rc_bit_last(probs[symbol], 0, rep0 += 1 << offset, SEQ_POS_MODEL); } @@ -628,19 +635,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, } while (++offset < ALIGN_BITS); #else case SEQ_ALIGN0: - rc_bit(coder->pos_align[symbol], , + rc_bit(coder->pos_align[symbol], 0, rep0 += 1, SEQ_ALIGN0); case SEQ_ALIGN1: - rc_bit(coder->pos_align[symbol], , + rc_bit(coder->pos_align[symbol], 0, rep0 += 2, SEQ_ALIGN1); case SEQ_ALIGN2: - rc_bit(coder->pos_align[symbol], , + rc_bit(coder->pos_align[symbol], 0, rep0 += 4, SEQ_ALIGN2); case SEQ_ALIGN3: // Like in SEQ_POS_MODEL, we don't // need "symbol" for anything else // than indexing the probability array. - rc_bit_last(coder->pos_align[symbol], , + rc_bit_last(coder->pos_align[symbol], 0, rep0 += 8, SEQ_ALIGN3); #endif @@ -725,9 +732,11 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, // is stored to rep0 and rep1, rep2 and rep3 // are updated accordingly. rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) { + uint32_t distance; + rc_update_0(coder->is_rep1[state]); - const uint32_t distance = rep1; + distance = rep1; rep1 = rep0; rep0 = distance; @@ -736,19 +745,23 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr, case SEQ_IS_REP2: rc_if_0(coder->is_rep2[state], SEQ_IS_REP2) { + uint32_t distance; + rc_update_0(coder->is_rep2[ state]); - const uint32_t distance = rep2; + distance = rep2; rep2 = rep1; rep1 = rep0; rep0 = distance; } else { + uint32_t distance; + rc_update_1(coder->is_rep2[ state]); - const uint32_t distance = rep3; + distance = rep3; rep3 = rep2; rep2 = rep1; rep1 = rep0; @@ -853,6 +866,9 @@ lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size) static void lzma_decoder_reset(lzma_coder *coder, const void *opt) { + uint32_t i, j, pos_state; + uint32_t num_pos_states; + const lzma_options_lzma *options = opt; // NOTE: We assume that lc/lp/pb are valid since they were @@ -879,8 +895,8 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt) rc_reset(coder->rc); // Bit and bittree decoders - for (uint32_t i = 0; i < STATES; ++i) { - for (uint32_t j = 0; j <= coder->pos_mask; ++j) { + for (i = 0; i < STATES; ++i) { + for (j = 0; j <= coder->pos_mask; ++j) { bit_reset(coder->is_match[i][j]); bit_reset(coder->is_rep0_long[i][j]); } @@ -891,22 +907,22 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt) bit_reset(coder->is_rep2[i]); } - for (uint32_t i = 0; i < LEN_TO_POS_STATES; ++i) + for (i = 0; i < LEN_TO_POS_STATES; ++i) bittree_reset(coder->pos_slot[i], POS_SLOT_BITS); - for (uint32_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) + for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) bit_reset(coder->pos_special[i]); bittree_reset(coder->pos_align, ALIGN_BITS); // Len decoders (also bit/bittree) - const uint32_t num_pos_states = 1U << options->pb; + num_pos_states = 1U << options->pb; bit_reset(coder->match_len_decoder.choice); bit_reset(coder->match_len_decoder.choice2); bit_reset(coder->rep_len_decoder.choice); bit_reset(coder->rep_len_decoder.choice2); - for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) { + for (pos_state = 0; pos_state < num_pos_states; ++pos_state) { bittree_reset(coder->match_len_decoder.low[pos_state], LEN_LOW_BITS); bittree_reset(coder->match_len_decoder.mid[pos_state], @@ -936,6 +952,8 @@ extern lzma_ret lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator, const void *opt, lzma_lz_options *lz_options) { + const lzma_options_lzma *options = opt; + if (lz->coder == NULL) { lz->coder = lzma_alloc(sizeof(lzma_coder), allocator); if (lz->coder == NULL) @@ -948,7 +966,6 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator, // All dictionary sizes are OK here. LZ decoder will take care of // the special cases. - const lzma_options_lzma *options = opt; lz_options->dict_size = options->dict_size; lz_options->preset_dict = options->preset_dict; lz_options->preset_dict_size = options->preset_dict_size; @@ -1028,11 +1045,12 @@ extern lzma_ret lzma_lzma_props_decode(void **options, lzma_allocator *allocator, const uint8_t *props, size_t props_size) { + lzma_options_lzma *opt; + if (props_size != 5) return LZMA_OPTIONS_ERROR; - lzma_options_lzma *opt - = lzma_alloc(sizeof(lzma_options_lzma), allocator); + opt = lzma_alloc(sizeof(lzma_options_lzma), allocator); if (opt == NULL) return LZMA_MEM_ERROR; diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c index 0b9ee9e..6186f83 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c @@ -28,11 +28,14 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder, symbol += UINT32_C(1) << 8; do { + uint32_t match_bit; + uint32_t subcoder_index; + uint32_t bit; + match_byte <<= 1; - const uint32_t match_bit = match_byte & offset; - const uint32_t subcoder_index - = offset + match_bit + (symbol >> 8); - const uint32_t bit = (symbol >> 7) & 1; + match_bit = match_byte & offset; + subcoder_index = offset + match_bit + (symbol >> 8); + bit = (symbol >> 7) & 1; rc_bit(rc, &subcoder[subcoder_index], bit); symbol <<= 1; @@ -77,16 +80,19 @@ literal(lzma_coder *coder, lzma_mf *mf, uint32_t position) static void length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state) { + uint32_t a0, a1, b0, b1; + uint32_t *prices; + uint32_t i; + const uint32_t table_size = lc->table_size; lc->counters[pos_state] = table_size; - const uint32_t a0 = rc_bit_0_price(lc->choice); - const uint32_t a1 = rc_bit_1_price(lc->choice); - const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2); - const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2); - uint32_t *const prices = lc->prices[pos_state]; + a0 = rc_bit_0_price(lc->choice); + a1 = rc_bit_1_price(lc->choice); + b0 = a1 + rc_bit_0_price(lc->choice2); + b1 = a1 + rc_bit_1_price(lc->choice2); + prices = lc->prices[pos_state]; - uint32_t i; for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i) prices[i] = a0 + rc_bittree_price(lc->low[pos_state], LEN_LOW_BITS, i); @@ -143,13 +149,16 @@ static inline void match(lzma_coder *coder, const uint32_t pos_state, const uint32_t distance, const uint32_t len) { + uint32_t pos_slot; + uint32_t len_to_pos_state; + update_match(coder->state); length(&coder->rc, &coder->match_len_encoder, pos_state, len, coder->fast_mode); - const uint32_t pos_slot = get_pos_slot(distance); - const uint32_t len_to_pos_state = get_len_to_pos_state(len); + pos_slot = get_pos_slot(distance); + len_to_pos_state = get_len_to_pos_state(len); rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state], POS_SLOT_BITS, pos_slot); @@ -313,14 +322,19 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf, uint8_t *restrict out, size_t *restrict out_pos, size_t out_size, uint32_t limit) { + uint32_t position; + // Initialize the stream if no data has been encoded yet. if (!coder->is_initialized && !encode_init(coder, mf)) return LZMA_OK; // Get the lowest bits of the uncompressed offset from the LZ layer. - uint32_t position = mf_position(mf); + position = mf_position(mf); while (true) { + uint32_t len; + uint32_t back; + // Encode pending bits, if any. Calling this before encoding // the next symbol is needed only with plain LZMA, since // LZMA2 always provides big enough buffer to flush @@ -359,8 +373,6 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf, // - UINT32_MAX: not a match but a literal // Value ranges for len: // - [MATCH_LEN_MIN, MATCH_LEN_MAX] - uint32_t len; - uint32_t back; if (coder->fast_mode) lzma_lzma_optimum_fast(coder, mf, &back, &len); @@ -453,10 +465,12 @@ static void length_encoder_reset(lzma_length_encoder *lencoder, const uint32_t num_pos_states, const bool fast_mode) { + size_t pos_state; + bit_reset(lencoder->choice); bit_reset(lencoder->choice2); - for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) { + for (pos_state = 0; pos_state < num_pos_states; ++pos_state) { bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS); bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS); } @@ -464,7 +478,7 @@ length_encoder_reset(lzma_length_encoder *lencoder, bittree_reset(lencoder->high, LEN_HIGH_BITS); if (!fast_mode) - for (size_t pos_state = 0; pos_state < num_pos_states; + for (pos_state = 0; pos_state < num_pos_states; ++pos_state) length_update_prices(lencoder, pos_state); @@ -475,6 +489,8 @@ length_encoder_reset(lzma_length_encoder *lencoder, extern lzma_ret lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options) { + size_t i, j; + if (!is_options_valid(options)) return LZMA_OPTIONS_ERROR; @@ -487,14 +503,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options) // State coder->state = STATE_LIT_LIT; - for (size_t i = 0; i < REP_DISTANCES; ++i) + for (i = 0; i < REP_DISTANCES; ++i) coder->reps[i] = 0; literal_init(coder->literal, options->lc, options->lp); // Bit encoders - for (size_t i = 0; i < STATES; ++i) { - for (size_t j = 0; j <= coder->pos_mask; ++j) { + for (i = 0; i < STATES; ++i) { + for (j = 0; j <= coder->pos_mask; ++j) { bit_reset(coder->is_match[i][j]); bit_reset(coder->is_rep0_long[i][j]); } @@ -505,11 +521,11 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options) bit_reset(coder->is_rep2[i]); } - for (size_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) + for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) bit_reset(coder->pos_special[i]); // Bit tree encoders - for (size_t i = 0; i < LEN_TO_POS_STATES; ++i) + for (i = 0; i < LEN_TO_POS_STATES; ++i) bittree_reset(coder->pos_slot[i], POS_SLOT_BITS); bittree_reset(coder->pos_align, ALIGN_BITS); @@ -548,6 +564,9 @@ extern lzma_ret lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator, const lzma_options_lzma *options, lzma_lz_options *lz_options) { + lzma_coder *coder; + uint32_t log_size = 0; + // Allocate lzma_coder if it wasn't already allocated. if (*coder_ptr == NULL) { *coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator); @@ -555,7 +574,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator, return LZMA_MEM_ERROR; } - lzma_coder *coder = *coder_ptr; + coder = *coder_ptr; // Set compression mode. We haven't validates the options yet, // but it's OK here, since nothing bad happens with invalid @@ -571,7 +590,6 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator, // Set dist_table_size. // Round the dictionary size up to next 2^n. - uint32_t log_size = 0; while ((UINT32_C(1) << log_size) < options->dict_size) ++log_size; @@ -625,13 +643,15 @@ lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, extern uint64_t lzma_lzma_encoder_memusage(const void *options) { + lzma_lz_options lz_options; + uint64_t lz_memusage; + if (!is_options_valid(options)) return UINT64_MAX; - lzma_lz_options lz_options; set_lz_options(&lz_options, options); - const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options); + lz_memusage = lzma_lz_encoder_memusage(&lz_options); if (lz_memusage == UINT64_MAX) return UINT64_MAX; diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c index f835f69..52c26e4 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c @@ -20,6 +20,14 @@ extern void lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, uint32_t *restrict back_res, uint32_t *restrict len_res) { + const uint8_t *buf; + uint32_t buf_avail; + uint32_t i; + uint32_t rep_len = 0; + uint32_t rep_index = 0; + uint32_t back_main = 0; + uint32_t limit; + const uint32_t nice_len = mf->nice_len; uint32_t len_main; @@ -32,8 +40,8 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, matches_count = coder->matches_count; } - const uint8_t *buf = mf_ptr(mf) - 1; - const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); + buf = mf_ptr(mf) - 1; + buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); if (buf_avail < 2) { // There's not enough input left to encode a match. @@ -43,10 +51,9 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, } // Look for repeated matches; scan the previous four match distances - uint32_t rep_len = 0; - uint32_t rep_index = 0; + for (i = 0; i < REP_DISTANCES; ++i) { + uint32_t len; - for (uint32_t i = 0; i < REP_DISTANCES; ++i) { // Pointer to the beginning of the match candidate const uint8_t *const buf_back = buf - coder->reps[i] - 1; @@ -57,7 +64,6 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, // The first two bytes matched. // Calculate the length of the match. - uint32_t len; for (len = 2; len < buf_avail && buf[len] == buf_back[len]; ++len) ; @@ -86,7 +92,6 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, return; } - uint32_t back_main = 0; if (len_main >= 2) { back_main = coder->matches[matches_count - 1].dist; @@ -153,15 +158,16 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, // the old buf pointer instead of recalculating it with mf_ptr(). ++buf; - const uint32_t limit = len_main - 1; + limit = len_main - 1; + + for (i = 0; i < REP_DISTANCES; ++i) { + uint32_t len; - for (uint32_t i = 0; i < REP_DISTANCES; ++i) { const uint8_t *const buf_back = buf - coder->reps[i] - 1; if (not_equal_16(buf, buf_back)) continue; - uint32_t len; for (len = 2; len < limit && buf[len] == buf_back[len]; ++len) ; diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c index 7e85649..d2829a2 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c @@ -35,12 +35,15 @@ get_literal_price(const lzma_coder *const coder, const uint32_t pos, symbol += UINT32_C(1) << 8; do { + uint32_t match_bit; + uint32_t subcoder_index; + uint32_t bit; + match_byte <<= 1; - const uint32_t match_bit = match_byte & offset; - const uint32_t subcoder_index - = offset + match_bit + (symbol >> 8); - const uint32_t bit = (symbol >> 7) & 1; + match_bit = match_byte & offset; + subcoder_index = offset + match_bit + (symbol >> 8); + bit = (symbol >> 7) & 1; price += rc_bit_price(subcoder[subcoder_index], bit); symbol <<= 1; @@ -131,7 +134,11 @@ get_pos_len_price(const lzma_coder *const coder, const uint32_t pos, static void fill_distances_prices(lzma_coder *coder) { - for (uint32_t len_to_pos_state = 0; + uint32_t len_to_pos_state; + uint32_t pos_slot; + uint32_t i; + + for (len_to_pos_state = 0; len_to_pos_state < LEN_TO_POS_STATES; ++len_to_pos_state) { @@ -139,7 +146,7 @@ fill_distances_prices(lzma_coder *coder) = coder->pos_slot_prices[len_to_pos_state]; // Price to encode the pos_slot. - for (uint32_t pos_slot = 0; + for (pos_slot = 0; pos_slot < coder->dist_table_size; ++pos_slot) pos_slot_prices[pos_slot] = rc_bittree_price( coder->pos_slot[len_to_pos_state], @@ -148,7 +155,7 @@ fill_distances_prices(lzma_coder *coder) // For matches with distance >= FULL_DISTANCES, add the price // of the direct bits part of the match distance. (Align bits // are handled by fill_align_prices()). - for (uint32_t pos_slot = END_POS_MODEL_INDEX; + for (pos_slot = END_POS_MODEL_INDEX; pos_slot < coder->dist_table_size; ++pos_slot) pos_slot_prices[pos_slot] += rc_direct_price( ((pos_slot >> 1) - 1) - ALIGN_BITS); @@ -156,7 +163,7 @@ fill_distances_prices(lzma_coder *coder) // Distances in the range [0, 3] are fully encoded with // pos_slot, so they are used for coder->distances_prices // as is. - for (uint32_t i = 0; i < START_POS_MODEL_INDEX; ++i) + for (i = 0; i < START_POS_MODEL_INDEX; ++i) coder->distances_prices[len_to_pos_state][i] = pos_slot_prices[i]; } @@ -164,7 +171,7 @@ fill_distances_prices(lzma_coder *coder) // Distances in the range [4, 127] depend on pos_slot and pos_special. // We do this in a loop separate from the above loop to avoid // redundant calls to get_pos_slot(). - for (uint32_t i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) { + for (i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) { const uint32_t pos_slot = get_pos_slot(i); const uint32_t footer_bits = ((pos_slot >> 1) - 1); const uint32_t base = (2 | (pos_slot & 1)) << footer_bits; @@ -172,7 +179,7 @@ fill_distances_prices(lzma_coder *coder) coder->pos_special + base - pos_slot - 1, footer_bits, i - base); - for (uint32_t len_to_pos_state = 0; + for (len_to_pos_state = 0; len_to_pos_state < LEN_TO_POS_STATES; ++len_to_pos_state) coder->distances_prices[len_to_pos_state][i] @@ -188,7 +195,8 @@ fill_distances_prices(lzma_coder *coder) static void fill_align_prices(lzma_coder *coder) { - for (uint32_t i = 0; i < ALIGN_TABLE_SIZE; ++i) + uint32_t i; + for (i = 0; i < ALIGN_TABLE_SIZE; ++i) coder->align_prices[i] = rc_bittree_reverse_price( coder->pos_align, ALIGN_BITS, i); @@ -225,12 +233,15 @@ static void backward(lzma_coder *restrict coder, uint32_t *restrict len_res, uint32_t *restrict back_res, uint32_t cur) { - coder->opts_end_index = cur; - uint32_t pos_mem = coder->opts[cur].pos_prev; uint32_t back_mem = coder->opts[cur].back_prev; + coder->opts_end_index = cur; + do { + const uint32_t pos_prev = pos_mem; + const uint32_t back_cur = back_mem; + if (coder->opts[cur].prev_1_is_literal) { make_literal(&coder->opts[pos_mem]); coder->opts[pos_mem].pos_prev = pos_mem - 1; @@ -245,9 +256,6 @@ backward(lzma_coder *restrict coder, uint32_t *restrict len_res, } } - const uint32_t pos_prev = pos_mem; - const uint32_t back_cur = back_mem; - back_mem = coder->opts[pos_prev].back_prev; pos_mem = coder->opts[pos_prev].pos_prev; @@ -274,6 +282,23 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, uint32_t *restrict back_res, uint32_t *restrict len_res, uint32_t position) { + uint32_t buf_avail; + const uint8_t *buf; + uint32_t rep_lens[REP_DISTANCES]; + uint32_t rep_max_index = 0; + uint32_t i; + + uint8_t current_byte; + uint8_t match_byte; + + uint32_t pos_state; + uint32_t match_price; + uint32_t rep_match_price; + uint32_t len_end; + uint32_t len; + + uint32_t normal_match_price; + const uint32_t nice_len = mf->nice_len; uint32_t len_main; @@ -287,19 +312,18 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, matches_count = coder->matches_count; } - const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); + buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); if (buf_avail < 2) { *back_res = UINT32_MAX; *len_res = 1; return UINT32_MAX; } - const uint8_t *const buf = mf_ptr(mf) - 1; + buf = mf_ptr(mf) - 1; - uint32_t rep_lens[REP_DISTANCES]; - uint32_t rep_max_index = 0; + for (i = 0; i < REP_DISTANCES; ++i) { + uint32_t len_test; - for (uint32_t i = 0; i < REP_DISTANCES; ++i) { const uint8_t *const buf_back = buf - coder->reps[i] - 1; if (not_equal_16(buf, buf_back)) { @@ -307,7 +331,6 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, continue; } - uint32_t len_test; for (len_test = 2; len_test < buf_avail && buf[len_test] == buf_back[len_test]; ++len_test) ; @@ -333,8 +356,8 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, return UINT32_MAX; } - const uint8_t current_byte = *buf; - const uint8_t match_byte = *(buf - coder->reps[0] - 1); + current_byte = *buf; + match_byte = *(buf - coder->reps[0] - 1); if (len_main < 2 && current_byte != match_byte && rep_lens[rep_max_index] < 2) { @@ -345,7 +368,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, coder->opts[0].state = coder->state; - const uint32_t pos_state = position & coder->pos_mask; + pos_state = position & coder->pos_mask; coder->opts[1].price = rc_bit_0_price( coder->is_match[coder->state][pos_state]) @@ -355,9 +378,9 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, make_literal(&coder->opts[1]); - const uint32_t match_price = rc_bit_1_price( + match_price = rc_bit_1_price( coder->is_match[coder->state][pos_state]); - const uint32_t rep_match_price = match_price + rep_match_price = match_price + rc_bit_1_price(coder->is_rep[coder->state]); if (match_byte == current_byte) { @@ -371,7 +394,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, } } - const uint32_t len_end = my_max(len_main, rep_lens[rep_max_index]); + len_end = my_max(len_main, rep_lens[rep_max_index]); if (len_end < 2) { *back_res = coder->opts[1].back_prev; @@ -381,21 +404,23 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, coder->opts[1].pos_prev = 0; - for (uint32_t i = 0; i < REP_DISTANCES; ++i) + for (i = 0; i < REP_DISTANCES; ++i) coder->opts[0].backs[i] = coder->reps[i]; - uint32_t len = len_end; + len = len_end; do { coder->opts[len].price = RC_INFINITY_PRICE; } while (--len >= 2); - for (uint32_t i = 0; i < REP_DISTANCES; ++i) { + for (i = 0; i < REP_DISTANCES; ++i) { + uint32_t price; + uint32_t rep_len = rep_lens[i]; if (rep_len < 2) continue; - const uint32_t price = rep_match_price + get_pure_rep_price( + price = rep_match_price + get_pure_rep_price( coder, i, coder->state, pos_state); do { @@ -414,7 +439,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf, } - const uint32_t normal_match_price = match_price + normal_match_price = match_price + rc_bit_0_price(coder->is_rep[coder->state]); len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2; @@ -456,6 +481,19 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, uint32_t new_len = coder->longest_match_length; uint32_t pos_prev = coder->opts[cur].pos_prev; lzma_lzma_state state; + uint32_t buf_avail; + uint32_t rep_index; + uint32_t i; + + uint32_t cur_price; + uint8_t current_byte; + uint8_t match_byte; + uint32_t pos_state; + uint32_t cur_and_1_price; + bool next_is_literal = false; + uint32_t match_price; + uint32_t rep_match_price; + uint32_t start_len = 2; if (coder->opts[cur].prev_1_is_literal) { --pos_prev; @@ -499,9 +537,10 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, } if (pos < REP_DISTANCES) { + uint32_t i; + reps[0] = coder->opts[pos_prev].backs[pos]; - uint32_t i; for (i = 1; i <= pos; ++i) reps[i] = coder->opts[pos_prev].backs[i - 1]; @@ -511,30 +550,28 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, } else { reps[0] = pos - REP_DISTANCES; - for (uint32_t i = 1; i < REP_DISTANCES; ++i) + for (i = 1; i < REP_DISTANCES; ++i) reps[i] = coder->opts[pos_prev].backs[i - 1]; } } coder->opts[cur].state = state; - for (uint32_t i = 0; i < REP_DISTANCES; ++i) + for (i = 0; i < REP_DISTANCES; ++i) coder->opts[cur].backs[i] = reps[i]; - const uint32_t cur_price = coder->opts[cur].price; + cur_price = coder->opts[cur].price; - const uint8_t current_byte = *buf; - const uint8_t match_byte = *(buf - reps[0] - 1); + current_byte = *buf; + match_byte = *(buf - reps[0] - 1); - const uint32_t pos_state = position & coder->pos_mask; + pos_state = position & coder->pos_mask; - const uint32_t cur_and_1_price = cur_price + cur_and_1_price = cur_price + rc_bit_0_price(coder->is_match[state][pos_state]) + get_literal_price(coder, position, buf[-1], !is_literal_state(state), match_byte, current_byte); - bool next_is_literal = false; - if (cur_and_1_price < coder->opts[cur + 1].price) { coder->opts[cur + 1].price = cur_and_1_price; coder->opts[cur + 1].pos_prev = cur; @@ -542,9 +579,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, next_is_literal = true; } - const uint32_t match_price = cur_price + match_price = cur_price + rc_bit_1_price(coder->is_match[state][pos_state]); - const uint32_t rep_match_price = match_price + rep_match_price = match_price + rc_bit_1_price(coder->is_rep[state]); if (match_byte == current_byte @@ -565,7 +602,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, if (buf_avail_full < 2) return len_end; - const uint32_t buf_avail = my_min(buf_avail_full, nice_len); + buf_avail = my_min(buf_avail_full, nice_len); if (!next_is_literal && match_byte != current_byte) { // speed optimization // try literal + rep0 @@ -579,21 +616,26 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, --len_test; if (len_test >= 2) { + uint32_t pos_state_next; + uint32_t next_rep_match_price; + uint32_t offset; + uint32_t cur_and_len_price; + lzma_lzma_state state_2 = state; update_literal(state_2); - const uint32_t pos_state_next = (position + 1) & coder->pos_mask; - const uint32_t next_rep_match_price = cur_and_1_price + pos_state_next = (position + 1) & coder->pos_mask; + next_rep_match_price = cur_and_1_price + rc_bit_1_price(coder->is_match[state_2][pos_state_next]) + rc_bit_1_price(coder->is_rep[state_2]); //for (; len_test >= 2; --len_test) { - const uint32_t offset = cur + 1 + len_test; + offset = cur + 1 + len_test; while (len_end < offset) coder->opts[++len_end].price = RC_INFINITY_PRICE; - const uint32_t cur_and_len_price = next_rep_match_price + cur_and_len_price = next_rep_match_price + get_rep_price(coder, 0, len_test, state_2, pos_state_next); @@ -609,14 +651,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, } - uint32_t start_len = 2; // speed optimization + for (rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) { + uint32_t len_test, len_test_2, len_test_temp; + uint32_t price, limit; - for (uint32_t rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) { const uint8_t *const buf_back = buf - reps[rep_index] - 1; if (not_equal_16(buf, buf_back)) continue; - uint32_t len_test; for (len_test = 2; len_test < buf_avail && buf[len_test] == buf_back[len_test]; ++len_test) ; @@ -624,8 +666,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, while (len_end < cur + len_test) coder->opts[++len_end].price = RC_INFINITY_PRICE; - const uint32_t len_test_temp = len_test; - const uint32_t price = rep_match_price + get_pure_rep_price( + len_test_temp = len_test; + price = rep_match_price + get_pure_rep_price( coder, rep_index, state, pos_state); do { @@ -647,8 +689,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, start_len = len_test + 1; - uint32_t len_test_2 = len_test + 1; - const uint32_t limit = my_min(buf_avail_full, + len_test_2 = len_test + 1; + limit = my_min(buf_avail_full, len_test_2 + nice_len); for (; len_test_2 < limit && buf[len_test_2] == buf_back[len_test_2]; @@ -657,12 +699,18 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, len_test_2 -= len_test + 1; if (len_test_2 >= 2) { + uint32_t pos_state_next; + uint32_t cur_and_len_literal_price; + uint32_t next_rep_match_price; + uint32_t offset; + uint32_t cur_and_len_price; + lzma_lzma_state state_2 = state; update_long_rep(state_2); - uint32_t pos_state_next = (position + len_test) & coder->pos_mask; + pos_state_next = (position + len_test) & coder->pos_mask; - const uint32_t cur_and_len_literal_price = price + cur_and_len_literal_price = price + get_len_price(&coder->rep_len_encoder, len_test, pos_state) + rc_bit_0_price(coder->is_match[state_2][pos_state_next]) @@ -674,17 +722,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, pos_state_next = (position + len_test + 1) & coder->pos_mask; - const uint32_t next_rep_match_price = cur_and_len_literal_price + next_rep_match_price = cur_and_len_literal_price + rc_bit_1_price(coder->is_match[state_2][pos_state_next]) + rc_bit_1_price(coder->is_rep[state_2]); //for(; len_test_2 >= 2; len_test_2--) { - const uint32_t offset = cur + len_test + 1 + len_test_2; + offset = cur + len_test + 1 + len_test_2; while (len_end < offset) coder->opts[++len_end].price = RC_INFINITY_PRICE; - const uint32_t cur_and_len_price = next_rep_match_price + cur_and_len_price = next_rep_match_price + get_rep_price(coder, 0, len_test_2, state_2, pos_state_next); @@ -715,17 +763,19 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, if (new_len >= start_len) { + uint32_t len_test; + uint32_t i = 0; + const uint32_t normal_match_price = match_price + rc_bit_0_price(coder->is_rep[state]); while (len_end < cur + new_len) coder->opts[++len_end].price = RC_INFINITY_PRICE; - uint32_t i = 0; while (start_len > coder->matches[i].len) ++i; - for (uint32_t len_test = start_len; ; ++len_test) { + for (len_test = start_len; ; ++len_test) { const uint32_t cur_back = coder->matches[i].dist; uint32_t cur_and_len_price = normal_match_price + get_pos_len_price(coder, @@ -753,12 +803,16 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, len_test_2 -= len_test + 1; if (len_test_2 >= 2) { + uint32_t pos_state_next; + uint32_t cur_and_len_literal_price; + uint32_t next_rep_match_price; + uint32_t offset; + lzma_lzma_state state_2 = state; update_match(state_2); - uint32_t pos_state_next - = (position + len_test) & coder->pos_mask; + pos_state_next = (position + len_test) & coder->pos_mask; - const uint32_t cur_and_len_literal_price = cur_and_len_price + cur_and_len_literal_price = cur_and_len_price + rc_bit_0_price( coder->is_match[state_2][pos_state_next]) + get_literal_price(coder, @@ -771,14 +825,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf, update_literal(state_2); pos_state_next = (pos_state_next + 1) & coder->pos_mask; - const uint32_t next_rep_match_price + next_rep_match_price = cur_and_len_literal_price + rc_bit_1_price( coder->is_match[state_2][pos_state_next]) + rc_bit_1_price(coder->is_rep[state_2]); // for(; len_test_2 >= 2; --len_test_2) { - const uint32_t offset = cur + len_test + 1 + len_test_2; + offset = cur + len_test + 1 + len_test_2; while (len_end < offset) coder->opts[++len_end].price = RC_INFINITY_PRICE; @@ -815,6 +869,10 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf, uint32_t *restrict back_res, uint32_t *restrict len_res, uint32_t position) { + uint32_t reps[REP_DISTANCES]; + uint32_t len_end; + uint32_t cur; + // If we have symbols pending, return the next pending symbol. if (coder->opts_end_index != coder->opts_current_index) { assert(mf->read_ahead > 0); @@ -841,14 +899,13 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf, // the original function into two pieces makes it at least a little // more readable, since those two parts don't share many variables. - uint32_t len_end = helper1(coder, mf, back_res, len_res, position); + len_end = helper1(coder, mf, back_res, len_res, position); if (len_end == UINT32_MAX) return; - uint32_t reps[REP_DISTANCES]; + memcpy(reps, coder->reps, sizeof(reps)); - uint32_t cur; for (cur = 1; cur < len_end; ++cur) { assert(cur < OPTS); diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c index 21e427a..8af9b9f 100644 --- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c +++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c @@ -16,6 +16,9 @@ extern LZMA_API(lzma_bool) lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset) { + static const uint8_t dict_size_values[] = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 }; + static const uint8_t depth_values[] = { 4, 8, 24, 48 }; + const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK; const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK; const uint32_t supported_flags = LZMA_PRESET_EXTREME; @@ -30,14 +33,13 @@ lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset) options->lp = LZMA_LP_DEFAULT; options->pb = LZMA_PB_DEFAULT; - options->dict_size = UINT32_C(1) << (uint8_t []){ - 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 }[level]; + options->dict_size = UINT32_C(1) << dict_size_values[level]; if (level <= 3) { options->mode = LZMA_MODE_FAST; options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4; options->nice_len = level <= 1 ? 128 : 273; - options->depth = (uint8_t []){ 4, 8, 24, 48 }[level]; + options->depth = depth_values[level]; } else { options->mode = LZMA_MODE_NORMAL; options->mf = LZMA_MF_BT4; diff --git a/Utilities/cmliblzma/liblzma/rangecoder/range_common.h b/Utilities/cmliblzma/liblzma/rangecoder/range_common.h index 2c74dc1..f15623e 100644 --- a/Utilities/cmliblzma/liblzma/rangecoder/range_common.h +++ b/Utilities/cmliblzma/liblzma/rangecoder/range_common.h @@ -40,8 +40,11 @@ // This does the same for a complete bit tree. // (A tree represented as an array.) #define bittree_reset(probs, bit_levels) \ - for (uint32_t bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \ - bit_reset((probs)[bt_i]) + do { \ + uint32_t bt_i; \ + for (bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \ + bit_reset((probs)[bt_i]); \ + } while (0) ////////////////////// diff --git a/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h b/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h index 1e1c369..e9614f2 100644 --- a/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h +++ b/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h @@ -115,7 +115,8 @@ rc_direct(lzma_range_encoder *rc, static inline void rc_flush(lzma_range_encoder *rc) { - for (size_t i = 0; i < 5; ++i) + size_t i; + for (i = 0; i < 5; ++i) rc->symbols[rc->count++] = RC_FLUSH; } diff --git a/Utilities/cmliblzma/liblzma/simple/arm.c b/Utilities/cmliblzma/liblzma/simple/arm.c index a84702a..8dcba39 100644 --- a/Utilities/cmliblzma/liblzma/simple/arm.c +++ b/Utilities/cmliblzma/liblzma/simple/arm.c @@ -22,12 +22,12 @@ arm_code(lzma_simple *simple lzma_attribute((__unused__)), size_t i; for (i = 0; i + 4 <= size; i += 4) { if (buffer[i + 3] == 0xEB) { + uint32_t dest; uint32_t src = (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | (buffer[i + 0]); src <<= 2; - uint32_t dest; if (is_encoder) dest = now_pos + (uint32_t)(i) + 8 + src; else diff --git a/Utilities/cmliblzma/liblzma/simple/armthumb.c b/Utilities/cmliblzma/liblzma/simple/armthumb.c index 4b49175..4b890a3 100644 --- a/Utilities/cmliblzma/liblzma/simple/armthumb.c +++ b/Utilities/cmliblzma/liblzma/simple/armthumb.c @@ -23,6 +23,7 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)), for (i = 0; i + 4 <= size; i += 2) { if ((buffer[i + 1] & 0xF8) == 0xF0 && (buffer[i + 3] & 0xF8) == 0xF8) { + uint32_t dest; uint32_t src = ((buffer[i + 1] & 0x7) << 19) | (buffer[i + 0] << 11) | ((buffer[i + 3] & 0x7) << 8) @@ -30,7 +31,6 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)), src <<= 1; - uint32_t dest; if (is_encoder) dest = now_pos + (uint32_t)(i) + 4 + src; else diff --git a/Utilities/cmliblzma/liblzma/simple/ia64.c b/Utilities/cmliblzma/liblzma/simple/ia64.c index ce3692b..c537cac 100644 --- a/Utilities/cmliblzma/liblzma/simple/ia64.c +++ b/Utilities/cmliblzma/liblzma/simple/ia64.c @@ -28,36 +28,42 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)), size_t i; for (i = 0; i + 16 <= size; i += 16) { + size_t slot; + const uint32_t instr_template = buffer[i] & 0x1F; const uint32_t mask = BRANCH_TABLE[instr_template]; uint32_t bit_pos = 5; - for (size_t slot = 0; slot < 3; ++slot, bit_pos += 41) { - if (((mask >> slot) & 1) == 0) - continue; - + for (slot = 0; slot < 3; ++slot, bit_pos += 41) { const size_t byte_pos = (bit_pos >> 3); const uint32_t bit_res = bit_pos & 0x7; uint64_t instruction = 0; + uint64_t inst_norm; + size_t j; + + if (((mask >> slot) & 1) == 0) + continue; - for (size_t j = 0; j < 6; ++j) + for (j = 0; j < 6; ++j) instruction += (uint64_t)( buffer[i + j + byte_pos]) << (8 * j); - uint64_t inst_norm = instruction >> bit_res; + inst_norm = instruction >> bit_res; if (((inst_norm >> 37) & 0xF) == 0x5 && ((inst_norm >> 9) & 0x7) == 0 /* && (inst_norm & 0x3F)== 0 */ ) { + uint32_t dest; + size_t j; + uint32_t src = (uint32_t)( (inst_norm >> 13) & 0xFFFFF); src |= ((inst_norm >> 36) & 1) << 20; src <<= 4; - uint32_t dest; if (is_encoder) dest = now_pos + (uint32_t)(i) + src; else @@ -73,7 +79,7 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)), instruction &= (1 << bit_res) - 1; instruction |= (inst_norm << bit_res); - for (size_t j = 0; j < 6; j++) + for (j = 0; j < 6; j++) buffer[i + j + byte_pos] = (uint8_t)( instruction >> (8 * j)); diff --git a/Utilities/cmliblzma/liblzma/simple/simple_coder.c b/Utilities/cmliblzma/liblzma/simple/simple_coder.c index a02b039..d147d4b 100644 --- a/Utilities/cmliblzma/liblzma/simple/simple_coder.c +++ b/Utilities/cmliblzma/liblzma/simple/simple_coder.c @@ -71,6 +71,9 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator, size_t in_size, uint8_t *restrict out, size_t *restrict out_pos, size_t out_size, lzma_action action) { + size_t out_avail; + size_t buf_avail; + // TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it // in cases when the filter is able to filter everything. With most // simple filters it can be done at offset that is a multiple of 2, @@ -105,9 +108,13 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator, // more data to out[] hopefully filling it completely. Then filter // the data in out[]. This step is where most of the data gets // filtered if the buffer sizes used by the application are reasonable. - const size_t out_avail = out_size - *out_pos; - const size_t buf_avail = coder->size - coder->pos; + out_avail = out_size - *out_pos; + buf_avail = coder->size - coder->pos; if (out_avail > buf_avail || buf_avail == 0) { + size_t size; + size_t filtered; + size_t unfiltered; + // Store the old position so that we know from which byte // to start filtering. const size_t out_start = *out_pos; @@ -130,11 +137,10 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator, } // Filter out[]. - const size_t size = *out_pos - out_start; - const size_t filtered = call_filter( - coder, out + out_start, size); + size = *out_pos - out_start; + filtered = call_filter(coder, out + out_start, size); - const size_t unfiltered = size - filtered; + unfiltered = size - filtered; assert(unfiltered <= coder->allocated / 2); // Now we can update coder->pos and coder->size, because diff --git a/Utilities/cmliblzma/liblzma/simple/simple_decoder.c b/Utilities/cmliblzma/liblzma/simple/simple_decoder.c index 0beccd3..034e158 100644 --- a/Utilities/cmliblzma/liblzma/simple/simple_decoder.c +++ b/Utilities/cmliblzma/liblzma/simple/simple_decoder.c @@ -17,14 +17,15 @@ extern lzma_ret lzma_simple_props_decode(void **options, lzma_allocator *allocator, const uint8_t *props, size_t props_size) { + lzma_options_bcj *opt; + if (props_size == 0) return LZMA_OK; if (props_size != 4) return LZMA_OPTIONS_ERROR; - lzma_options_bcj *opt = lzma_alloc( - sizeof(lzma_options_bcj), allocator); + opt = lzma_alloc(sizeof(lzma_options_bcj), allocator); if (opt == NULL) return LZMA_MEM_ERROR; diff --git a/Utilities/cmliblzma/liblzma/simple/sparc.c b/Utilities/cmliblzma/liblzma/simple/sparc.c index 8270d6a..0ddd2ac 100644 --- a/Utilities/cmliblzma/liblzma/simple/sparc.c +++ b/Utilities/cmliblzma/liblzma/simple/sparc.c @@ -26,6 +26,8 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)), || (buffer[i] == 0x7F && (buffer[i + 1] & 0xC0) == 0xC0)) { + uint32_t dest; + uint32_t src = ((uint32_t)buffer[i + 0] << 24) | ((uint32_t)buffer[i + 1] << 16) | ((uint32_t)buffer[i + 2] << 8) @@ -33,7 +35,6 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)), src <<= 2; - uint32_t dest; if (is_encoder) dest = now_pos + (uint32_t)(i) + src; else diff --git a/Utilities/cmliblzma/liblzma/simple/x86.c b/Utilities/cmliblzma/liblzma/simple/x86.c index 5d1509b..101d8ed 100644 --- a/Utilities/cmliblzma/liblzma/simple/x86.c +++ b/Utilities/cmliblzma/liblzma/simple/x86.c @@ -36,30 +36,36 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder, uint32_t prev_mask = simple->prev_mask; uint32_t prev_pos = simple->prev_pos; + size_t limit; + size_t buffer_pos; + if (size < 5) return 0; if (now_pos - prev_pos > 5) prev_pos = now_pos - 5; - const size_t limit = size - 5; - size_t buffer_pos = 0; + limit = size - 5; + buffer_pos = 0; while (buffer_pos <= limit) { + uint32_t offset; + uint32_t i; + uint8_t b = buffer[buffer_pos]; if (b != 0xE8 && b != 0xE9) { ++buffer_pos; continue; } - const uint32_t offset = now_pos + (uint32_t)(buffer_pos) + offset = now_pos + (uint32_t)(buffer_pos) - prev_pos; prev_pos = now_pos + (uint32_t)(buffer_pos); if (offset > 5) { prev_mask = 0; } else { - for (uint32_t i = 0; i < offset; ++i) { + for (i = 0; i < offset; ++i) { prev_mask &= 0x77; prev_mask <<= 1; } @@ -78,6 +84,8 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder, uint32_t dest; while (true) { + uint32_t i; + if (is_encoder) dest = src + (now_pos + (uint32_t)( buffer_pos) + 5); @@ -88,8 +96,7 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder, if (prev_mask == 0) break; - const uint32_t i = MASK_TO_BIT_NUMBER[ - prev_mask >> 1]; + i = MASK_TO_BIT_NUMBER[prev_mask >> 1]; b = (uint8_t)(dest >> (24 - i * 8)); |