summaryrefslogtreecommitdiffstats
path: root/Utilities/cmliblzma
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmliblzma')
-rw-r--r--Utilities/cmliblzma/liblzma/check/check.c12
-rw-r--r--Utilities/cmliblzma/liblzma/check/crc32_fast.c8
-rw-r--r--Utilities/cmliblzma/liblzma/check/crc64_fast.c4
-rw-r--r--Utilities/cmliblzma/liblzma/check/sha256.c10
-rw-r--r--Utilities/cmliblzma/liblzma/common/alone_decoder.c18
-rw-r--r--Utilities/cmliblzma/liblzma/common/alone_encoder.c22
-rw-r--r--Utilities/cmliblzma/liblzma/common/auto_decoder.c2
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c8
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c40
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_decoder.c2
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_encoder.c2
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_header_decoder.c21
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_header_encoder.c19
-rw-r--r--Utilities/cmliblzma/liblzma/common/block_util.c11
-rw-r--r--Utilities/cmliblzma/liblzma/common/common.c12
-rw-r--r--Utilities/cmliblzma/liblzma/common/common.h64
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c12
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c11
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_common.c125
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_decoder.c80
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_encoder.c133
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c6
-rw-r--r--Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c3
-rw-r--r--Utilities/cmliblzma/liblzma/common/index.c102
-rw-r--r--Utilities/cmliblzma/liblzma/common/index_decoder.c16
-rw-r--r--Utilities/cmliblzma/liblzma/common/index_encoder.c11
-rw-r--r--Utilities/cmliblzma/liblzma/common/index_hash.c10
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c6
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c22
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_decoder.c28
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_encoder.c35
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c8
-rw-r--r--Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c8
-rw-r--r--Utilities/cmliblzma/liblzma/common/vli_size.c3
-rw-r--r--Utilities/cmliblzma/liblzma/delta/delta_common.c4
-rw-r--r--Utilities/cmliblzma/liblzma/delta/delta_decoder.c15
-rw-r--r--Utilities/cmliblzma/liblzma/delta/delta_encoder.c9
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_decoder.c19
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_decoder.h24
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_encoder.c53
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_encoder.h2
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h21
-rw-r--r--Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c167
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/fastpos.h2
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c9
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c18
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_common.h9
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c162
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c72
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c26
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c201
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c7
-rw-r--r--Utilities/cmliblzma/liblzma/rangecoder/range_common.h7
-rw-r--r--Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h3
-rw-r--r--Utilities/cmliblzma/liblzma/simple/arm.c2
-rw-r--r--Utilities/cmliblzma/liblzma/simple/armthumb.c2
-rw-r--r--Utilities/cmliblzma/liblzma/simple/ia64.c22
-rw-r--r--Utilities/cmliblzma/liblzma/simple/simple_coder.c18
-rw-r--r--Utilities/cmliblzma/liblzma/simple/simple_decoder.c5
-rw-r--r--Utilities/cmliblzma/liblzma/simple/sparc.c3
-rw-r--r--Utilities/cmliblzma/liblzma/simple/x86.c19
61 files changed, 700 insertions, 1075 deletions
diff --git a/Utilities/cmliblzma/liblzma/check/check.c b/Utilities/cmliblzma/liblzma/check/check.c
index 979b0a8..428ddae 100644
--- a/Utilities/cmliblzma/liblzma/check/check.c
+++ b/Utilities/cmliblzma/liblzma/check/check.c
@@ -16,6 +16,9 @@
extern LZMA_API(lzma_bool)
lzma_check_is_supported(lzma_check type)
{
+ if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
+ return false;
+
static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = {
true, // LZMA_CHECK_NONE
@@ -53,9 +56,6 @@ lzma_check_is_supported(lzma_check type)
false, // Reserved
};
- if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
- return false;
-
return available_checks[(unsigned int)(type)];
}
@@ -63,6 +63,9 @@ lzma_check_is_supported(lzma_check type)
extern LZMA_API(uint32_t)
lzma_check_size(lzma_check type)
{
+ if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
+ return UINT32_MAX;
+
// See file-format.txt section 2.1.1.2.
static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = {
0,
@@ -73,9 +76,6 @@ lzma_check_size(lzma_check type)
64, 64, 64
};
- if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
- return UINT32_MAX;
-
return check_sizes[(unsigned int)(type)];
}
diff --git a/Utilities/cmliblzma/liblzma/check/crc32_fast.c b/Utilities/cmliblzma/liblzma/check/crc32_fast.c
index c2c3cb7..3de0263 100644
--- a/Utilities/cmliblzma/liblzma/check/crc32_fast.c
+++ b/Utilities/cmliblzma/liblzma/check/crc32_fast.c
@@ -33,8 +33,6 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
#endif
if (size > 8) {
- const uint8_t * limit;
-
// Fix the alignment, if needed. The if statement above
// ensures that this won't read past the end of buf[].
while ((uintptr_t)(buf) & 7) {
@@ -43,7 +41,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
}
// Calculate the position where to stop.
- limit = buf + (size & ~(size_t)(7));
+ const uint8_t *const limit = buf + (size & ~(size_t)(7));
// Calculate how many bytes must be calculated separately
// before returning the result.
@@ -51,8 +49,6 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
// Calculate the CRC32 using the slice-by-eight algorithm.
while (buf < limit) {
- uint32_t tmp;
-
crc ^= *(const uint32_t *)(buf);
buf += 4;
@@ -61,7 +57,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
^ lzma_crc32_table[5][C(crc)]
^ lzma_crc32_table[4][D(crc)];
- tmp = *(const uint32_t *)(buf);
+ const uint32_t tmp = *(const uint32_t *)(buf);
buf += 4;
// At least with some compilers, it is critical for
diff --git a/Utilities/cmliblzma/liblzma/check/crc64_fast.c b/Utilities/cmliblzma/liblzma/check/crc64_fast.c
index 1436557..52af29e 100644
--- a/Utilities/cmliblzma/liblzma/check/crc64_fast.c
+++ b/Utilities/cmliblzma/liblzma/check/crc64_fast.c
@@ -36,14 +36,12 @@ lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
#endif
if (size > 4) {
- const uint8_t *limit;
-
while ((uintptr_t)(buf) & 3) {
crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
--size;
}
- limit = buf + (size & ~(size_t)(3));
+ const uint8_t *const limit = buf + (size & ~(size_t)(3));
size &= (size_t)(3);
while (buf < limit) {
diff --git a/Utilities/cmliblzma/liblzma/check/sha256.c b/Utilities/cmliblzma/liblzma/check/sha256.c
index 3af6aa6..f2cc0d7 100644
--- a/Utilities/cmliblzma/liblzma/check/sha256.c
+++ b/Utilities/cmliblzma/liblzma/check/sha256.c
@@ -84,13 +84,12 @@ transform(uint32_t state[8], const uint32_t data[16])
{
uint32_t W[16];
uint32_t T[8];
- unsigned int j;
// Copy state[] to working vars.
memcpy(T, state, sizeof(T));
// 64 operations, partially loop unrolled
- for (j = 0; j < 64; j += 16) {
+ for (unsigned int j = 0; j < 64; j += 16) {
R( 0); R( 1); R( 2); R( 3);
R( 4); R( 5); R( 6); R( 7);
R( 8); R( 9); R(10); R(11);
@@ -117,9 +116,8 @@ process(lzma_check_state *check)
#else
uint32_t data[16];
- size_t i;
- for (i = 0; i < 16; ++i)
+ for (size_t i = 0; i < 16; ++i)
data[i] = bswap32(check->buffer.u32[i]);
transform(check->state.sha256.state, data);
@@ -174,8 +172,6 @@ lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
extern void
lzma_sha256_finish(lzma_check_state *check)
{
- size_t i;
-
// Add padding as described in RFC 3174 (it describes SHA-1 but
// the same padding style is used for SHA-256 too).
size_t pos = check->state.sha256.size & 0x3F;
@@ -197,7 +193,7 @@ lzma_sha256_finish(lzma_check_state *check)
process(check);
- for (i = 0; i < 8; ++i)
+ for (size_t i = 0; i < 8; ++i)
check->buffer.u32[i] = conv32be(check->state.sha256.state[i]);
return;
diff --git a/Utilities/cmliblzma/liblzma/common/alone_decoder.c b/Utilities/cmliblzma/liblzma/common/alone_decoder.c
index a20cf49..c25112e 100644
--- a/Utilities/cmliblzma/liblzma/common/alone_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/alone_decoder.c
@@ -126,17 +126,19 @@ alone_decode(lzma_coder *coder,
// Fall through
case SEQ_CODER_INIT: {
- lzma_ret ret;
+ if (coder->memusage > coder->memlimit)
+ return LZMA_MEMLIMIT_ERROR;
lzma_filter_info filters[2] = {
- { 0, &lzma_lzma_decoder_init, &coder->options },
- { 0, NULL, NULL }
+ {
+ .init = &lzma_lzma_decoder_init,
+ .options = &coder->options,
+ }, {
+ .init = NULL,
+ }
};
- if (coder->memusage > coder->memlimit)
- return LZMA_MEMLIMIT_ERROR;
-
- ret = lzma_next_filter_init(&coder->next,
+ const lzma_ret ret = lzma_next_filter_init(&coder->next,
allocator, filters);
if (ret != LZMA_OK)
return ret;
@@ -227,7 +229,7 @@ lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit)
{
- lzma_next_strm_init2(lzma_alone_decoder_init, strm, memlimit, false);
+ lzma_next_strm_init(lzma_alone_decoder_init, strm, memlimit, false);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/alone_encoder.c b/Utilities/cmliblzma/liblzma/common/alone_encoder.c
index 62df126..eb1697e 100644
--- a/Utilities/cmliblzma/liblzma/common/alone_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/alone_encoder.c
@@ -78,14 +78,6 @@ static lzma_ret
alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_options_lzma *options)
{
- uint32_t d;
-
- // Initialize the LZMA encoder.
- const lzma_filter_info filters[2] = {
- { 0, &lzma_lzma_encoder_init, (void *)(options) },
- { 0, NULL, NULL }
- };
-
lzma_next_coder_init(&alone_encoder_init, next, allocator);
if (next->coder == NULL) {
@@ -115,7 +107,7 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// one is the next unless it is UINT32_MAX. While the header would
// allow any 32-bit integer, we do this to keep the decoder of liblzma
// accepting the resulting files.
- d = options->dict_size - 1;
+ uint32_t d = options->dict_size - 1;
d |= d >> 2;
d |= d >> 3;
d |= d >> 4;
@@ -129,6 +121,16 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// - Uncompressed size (always unknown and using EOPM)
memset(next->coder->header + 1 + 4, 0xFF, 8);
+ // Initialize the LZMA encoder.
+ const lzma_filter_info filters[2] = {
+ {
+ .init = &lzma_lzma_encoder_init,
+ .options = (void *)(options),
+ }, {
+ .init = NULL,
+ }
+ };
+
return lzma_next_filter_init(&next->coder->next, allocator, filters);
}
@@ -146,7 +148,7 @@ lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options)
{
- lzma_next_strm_init1(alone_encoder_init, strm, options);
+ lzma_next_strm_init(alone_encoder_init, strm, options);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/auto_decoder.c b/Utilities/cmliblzma/liblzma/common/auto_decoder.c
index 6f3c862..35c895f 100644
--- a/Utilities/cmliblzma/liblzma/common/auto_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/auto_decoder.c
@@ -177,7 +177,7 @@ auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
{
- lzma_next_strm_init2(auto_decoder_init, strm, memlimit, flags);
+ lzma_next_strm_init(auto_decoder_init, strm, memlimit, flags);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c
index b4bd388..ff27a11 100644
--- a/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_buffer_decoder.c
@@ -18,9 +18,6 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- lzma_next_coder block_decoder;
- lzma_ret ret;
-
if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|| *in_pos > in_size || out_pos == NULL
|| (out == NULL && *out_pos != out_size)
@@ -28,8 +25,9 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
return LZMA_PROG_ERROR;
// Initialize the Block decoder.
- block_decoder = LZMA_NEXT_CODER_INIT;
- ret = lzma_block_decoder_init(&block_decoder, allocator, block);
+ lzma_next_coder block_decoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_block_decoder_init(
+ &block_decoder, allocator, block);
if (ret == LZMA_OK) {
// Save the positions so that we can restore them in case
diff --git a/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c
index 136f7f5..519c6a6 100644
--- a/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c
@@ -31,8 +31,6 @@
static lzma_vli
lzma2_bound(lzma_vli uncompressed_size)
{
- lzma_vli overhead;
-
// Prevent integer overflow in overhead calculation.
if (uncompressed_size > COMPRESSED_SIZE_MAX)
return 0;
@@ -41,7 +39,7 @@ lzma2_bound(lzma_vli uncompressed_size)
// uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX,
// multiply by the size of per-chunk header, and add one byte for
// the end marker.
- overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
+ const lzma_vli overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
/ LZMA2_CHUNK_MAX)
* LZMA2_HEADER_UNCOMPRESSED + 1;
@@ -84,17 +82,15 @@ static lzma_ret
block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- size_t in_pos = 0;
- uint8_t control = 0x01; // Dictionary reset
- lzma_filter *filters_orig;
-
// TODO: Figure out if the last filter is LZMA2 or Subblock and use
// that filter to encode the uncompressed chunks.
// Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at
// all, but LZMA2 always requires a dictionary, so use the minimum
// value to minimize memory usage of the decoder.
- lzma_options_lzma lzma2 = { LZMA_DICT_SIZE_MIN };
+ lzma_options_lzma lzma2 = {
+ .dict_size = LZMA_DICT_SIZE_MIN,
+ };
lzma_filter filters[2];
filters[0].id = LZMA_FILTER_LZMA2;
@@ -103,7 +99,7 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
// Set the above filter options to *block temporarily so that we can
// encode the Block Header.
- filters_orig = block->filters;
+ lzma_filter *filters_orig = block->filters;
block->filters = filters;
if (lzma_block_header_size(block) != LZMA_OK) {
@@ -132,17 +128,18 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
*out_pos += block->header_size;
// Encode the data using LZMA2 uncompressed chunks.
+ size_t in_pos = 0;
+ uint8_t control = 0x01; // Dictionary reset
while (in_pos < in_size) {
- size_t copy_size;
-
// Control byte: Indicate uncompressed chunk, of which
// the first resets the dictionary.
out[(*out_pos)++] = control;
control = 0x02; // No dictionary reset
// Size of the uncompressed chunk
- copy_size = my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
+ const size_t copy_size
+ = my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
out[(*out_pos)++] = (copy_size - 1) >> 8;
out[(*out_pos)++] = (copy_size - 1) & 0xFF;
@@ -167,10 +164,6 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- size_t out_start;
- lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
- lzma_ret ret;
-
// Find out the size of the Block Header.
block->compressed_size = lzma2_bound(in_size);
if (block->compressed_size == 0)
@@ -183,7 +176,7 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
if (out_size - *out_pos <= block->header_size)
return LZMA_BUF_ERROR;
- out_start = *out_pos;
+ const size_t out_start = *out_pos;
*out_pos += block->header_size;
// Limit out_size so that we stop encoding if the output would grow
@@ -193,7 +186,8 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
// TODO: In many common cases this could be optimized to use
// significantly less memory.
- ret = lzma_raw_encoder_init(
+ lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_raw_encoder_init(
&raw_encoder, allocator, block->filters);
if (ret == LZMA_OK) {
@@ -232,10 +226,6 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- size_t check_size;
- lzma_ret ret;
- size_t i;
-
// Validate the arguments.
if (block == NULL || (in == NULL && in_size != 0) || out == NULL
|| out_pos == NULL || *out_pos > out_size)
@@ -259,7 +249,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
out_size -= (out_size - *out_pos) & 3;
// Get the size of the Check field.
- check_size = lzma_check_size(block->check);
+ const size_t check_size = lzma_check_size(block->check);
assert(check_size != UINT32_MAX);
// Reserve space for the Check field.
@@ -269,7 +259,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
out_size -= check_size;
// Do the actual compression.
- ret = block_encode_normal(block, allocator,
+ const lzma_ret ret = block_encode_normal(block, allocator,
in, in_size, out, out_pos, out_size);
if (ret != LZMA_OK) {
// If the error was something else than output buffer
@@ -291,7 +281,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
// Block Padding. No buffer overflow here, because we already adjusted
// out_size so that (out_size - out_start) is a multiple of four.
// Thus, if the buffer is full, the loop body can never run.
- for (i = (size_t)(block->compressed_size); i & 3; ++i) {
+ for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) {
assert(*out_pos < out_size);
out[(*out_pos)++] = 0x00;
}
diff --git a/Utilities/cmliblzma/liblzma/common/block_decoder.c b/Utilities/cmliblzma/liblzma/common/block_decoder.c
index 3de3851..a3ce6f4 100644
--- a/Utilities/cmliblzma/liblzma/common/block_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_decoder.c
@@ -233,7 +233,7 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_block_decoder(lzma_stream *strm, lzma_block *block)
{
- lzma_next_strm_init1(lzma_block_decoder_init, strm, block);
+ lzma_next_strm_init(lzma_block_decoder_init, strm, block);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/block_encoder.c b/Utilities/cmliblzma/liblzma/common/block_encoder.c
index 63e2687..1eeb502 100644
--- a/Utilities/cmliblzma/liblzma/common/block_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_encoder.c
@@ -208,7 +208,7 @@ lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_block_encoder(lzma_stream *strm, lzma_block *block)
{
- lzma_next_strm_init1(lzma_block_encoder_init, strm, block);
+ lzma_next_strm_init(lzma_block_encoder_init, strm, block);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/block_header_decoder.c b/Utilities/cmliblzma/liblzma/common/block_header_decoder.c
index f6e470e..2c9573e 100644
--- a/Utilities/cmliblzma/liblzma/common/block_header_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_header_decoder.c
@@ -17,12 +17,10 @@
static void
free_properties(lzma_block *block, lzma_allocator *allocator)
{
- size_t i;
-
// Free allocated filter options. The last array member is not
// touched after the initialization in the beginning of
// lzma_block_header_decode(), so we don't need to touch that here.
- for (i = 0; i < LZMA_FILTERS_MAX; ++i) {
+ for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) {
lzma_free(block->filters[i].options, allocator);
block->filters[i].id = LZMA_VLI_UNKNOWN;
block->filters[i].options = NULL;
@@ -36,13 +34,6 @@ extern LZMA_API(lzma_ret)
lzma_block_header_decode(lzma_block *block,
lzma_allocator *allocator, const uint8_t *in)
{
- const size_t filter_count = (in[1] & 3) + 1;
- size_t in_size;
- size_t i;
-
- // Start after the Block Header Size and Block Flags fields.
- size_t in_pos = 2;
-
// NOTE: We consider the header to be corrupt not only when the
// CRC32 doesn't match, but also when variable-length integers
// are invalid or over 63 bits, or if the header is too small
@@ -50,7 +41,7 @@ lzma_block_header_decode(lzma_block *block,
// Initialize the filter options array. This way the caller can
// safely free() the options even if an error occurs in this function.
- for (i = 0; i <= LZMA_FILTERS_MAX; ++i) {
+ for (size_t i = 0; i <= LZMA_FILTERS_MAX; ++i) {
block->filters[i].id = LZMA_VLI_UNKNOWN;
block->filters[i].options = NULL;
}
@@ -65,7 +56,7 @@ lzma_block_header_decode(lzma_block *block,
return LZMA_PROG_ERROR;
// Exclude the CRC32 field.
- in_size = block->header_size - 4;
+ const size_t in_size = block->header_size - 4;
// Verify CRC32
if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size))
@@ -75,6 +66,9 @@ lzma_block_header_decode(lzma_block *block,
if (in[1] & 0x3C)
return LZMA_OPTIONS_ERROR;
+ // Start after the Block Header Size and Block Flags fields.
+ size_t in_pos = 2;
+
// Compressed Size
if (in[1] & 0x40) {
return_if_error(lzma_vli_decode(&block->compressed_size,
@@ -96,7 +90,8 @@ lzma_block_header_decode(lzma_block *block,
block->uncompressed_size = LZMA_VLI_UNKNOWN;
// Filter Flags
- for (i = 0; i < filter_count; ++i) {
+ const size_t filter_count = (in[1] & 3) + 1;
+ for (size_t i = 0; i < filter_count; ++i) {
const lzma_ret ret = lzma_filter_flags_decode(
&block->filters[i], allocator,
in, &in_pos, in_size);
diff --git a/Utilities/cmliblzma/liblzma/common/block_header_encoder.c b/Utilities/cmliblzma/liblzma/common/block_header_encoder.c
index 650295c..707dd0c 100644
--- a/Utilities/cmliblzma/liblzma/common/block_header_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/block_header_encoder.c
@@ -17,14 +17,12 @@
extern LZMA_API(lzma_ret)
lzma_block_header_size(lzma_block *block)
{
- size_t i;
+ if (block->version != 0)
+ return LZMA_OPTIONS_ERROR;
// Block Header Size + Block Flags + CRC32.
uint32_t size = 1 + 1 + 4;
- if (block->version != 0)
- return LZMA_OPTIONS_ERROR;
-
// Compressed Size
if (block->compressed_size != LZMA_VLI_UNKNOWN) {
const uint32_t add = lzma_vli_size(block->compressed_size);
@@ -47,13 +45,12 @@ lzma_block_header_size(lzma_block *block)
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR;
- for (i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
- uint32_t add;
-
+ for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
// Don't allow too many filters.
if (i == LZMA_FILTERS_MAX)
return LZMA_PROG_ERROR;
+ uint32_t add;
return_if_error(lzma_filter_flags_size(&add,
block->filters + i));
@@ -76,23 +73,20 @@ lzma_block_header_size(lzma_block *block)
extern LZMA_API(lzma_ret)
lzma_block_header_encode(const lzma_block *block, uint8_t *out)
{
- size_t out_size;
- size_t out_pos = 2;
- size_t filter_count = 0;
-
// Validate everything but filters.
if (lzma_block_unpadded_size(block) == 0
|| !lzma_vli_is_valid(block->uncompressed_size))
return LZMA_PROG_ERROR;
// Indicate the size of the buffer _excluding_ the CRC32 field.
- out_size = block->header_size - 4;
+ const size_t out_size = block->header_size - 4;
// Store the Block Header Size.
out[0] = out_size / 4;
// We write Block Flags in pieces.
out[1] = 0x00;
+ size_t out_pos = 2;
// Compressed Size
if (block->compressed_size != LZMA_VLI_UNKNOWN) {
@@ -114,6 +108,7 @@ lzma_block_header_encode(const lzma_block *block, uint8_t *out)
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR;
+ size_t filter_count = 0;
do {
// There can be a maximum of four filters.
if (filter_count == LZMA_FILTERS_MAX)
diff --git a/Utilities/cmliblzma/liblzma/common/block_util.c b/Utilities/cmliblzma/liblzma/common/block_util.c
index 4cd34d1..62c9345 100644
--- a/Utilities/cmliblzma/liblzma/common/block_util.c
+++ b/Utilities/cmliblzma/liblzma/common/block_util.c
@@ -17,14 +17,11 @@
extern LZMA_API(lzma_ret)
lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
{
- uint32_t container_size;
- lzma_vli compressed_size;
-
// Validate everything but Uncompressed Size and filters.
if (lzma_block_unpadded_size(block) == 0)
return LZMA_PROG_ERROR;
- container_size = block->header_size
+ const uint32_t container_size = block->header_size
+ lzma_check_size(block->check);
// Validate that Compressed Size will be greater than zero.
@@ -34,7 +31,7 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
// Calculate what Compressed Size is supposed to be.
// If Compressed Size was present in Block Header,
// compare that the new value matches it.
- compressed_size = unpadded_size - container_size;
+ const lzma_vli compressed_size = unpadded_size - container_size;
if (block->compressed_size != LZMA_VLI_UNKNOWN
&& block->compressed_size != compressed_size)
return LZMA_DATA_ERROR;
@@ -48,8 +45,6 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
extern LZMA_API(lzma_vli)
lzma_block_unpadded_size(const lzma_block *block)
{
- lzma_vli unpadded_size;
-
// Validate the values that we are interested in i.e. all but
// Uncompressed Size and the filters.
//
@@ -71,7 +66,7 @@ lzma_block_unpadded_size(const lzma_block *block)
return LZMA_VLI_UNKNOWN;
// Calculate Unpadded Size and validate it.
- unpadded_size = block->compressed_size
+ const lzma_vli unpadded_size = block->compressed_size
+ block->header_size
+ lzma_check_size(block->check);
diff --git a/Utilities/cmliblzma/liblzma/common/common.c b/Utilities/cmliblzma/liblzma/common/common.c
index d0105e1..b9e3860 100644
--- a/Utilities/cmliblzma/liblzma/common/common.c
+++ b/Utilities/cmliblzma/liblzma/common/common.c
@@ -38,12 +38,12 @@ lzma_version_string(void)
extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1)
lzma_alloc(size_t size, lzma_allocator *allocator)
{
- void *ptr;
-
// Some malloc() variants return NULL if called with size == 0.
if (size == 0)
size = 1;
+ void *ptr;
+
if (allocator != NULL && allocator->alloc != NULL)
ptr = allocator->alloc(allocator->opaque, 1, size);
else
@@ -173,10 +173,6 @@ lzma_strm_init(lzma_stream *strm)
extern LZMA_API(lzma_ret)
lzma_code(lzma_stream *strm, lzma_action action)
{
- size_t in_pos = 0;
- size_t out_pos = 0;
- lzma_ret ret;
-
// Sanity checks
if ((strm->next_in == NULL && strm->avail_in != 0)
|| (strm->next_out == NULL && strm->avail_out != 0)
@@ -252,7 +248,9 @@ lzma_code(lzma_stream *strm, lzma_action action)
return LZMA_PROG_ERROR;
}
- ret = strm->internal->next.code(
+ size_t in_pos = 0;
+ size_t out_pos = 0;
+ lzma_ret ret = strm->internal->next.code(
strm->internal->next.coder, strm->allocator,
strm->next_in, &in_pos, strm->avail_in,
strm->next_out, &out_pos, strm->avail_out, action);
diff --git a/Utilities/cmliblzma/liblzma/common/common.h b/Utilities/cmliblzma/liblzma/common/common.h
index a1a1591..6d7412f 100644
--- a/Utilities/cmliblzma/liblzma/common/common.h
+++ b/Utilities/cmliblzma/liblzma/common/common.h
@@ -155,18 +155,18 @@ struct lzma_next_coder_s {
};
-/// Constant to initialize lzma_next_coder structure
-static const lzma_next_coder LZMA_NEXT_CODER_INIT =
- {
- NULL,
- LZMA_VLI_UNKNOWN,
- (uintptr_t)(NULL),
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- };
+/// Macro to initialize lzma_next_coder structure
+#define LZMA_NEXT_CODER_INIT \
+ (lzma_next_coder){ \
+ .coder = NULL, \
+ .init = (uintptr_t)(NULL), \
+ .id = LZMA_VLI_UNKNOWN, \
+ .code = NULL, \
+ .end = NULL, \
+ .get_check = NULL, \
+ .memconfig = NULL, \
+ .update = NULL, \
+ }
/// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to
@@ -211,7 +211,7 @@ extern void lzma_free(void *ptr, lzma_allocator *allocator);
/// Allocates strm->internal if it is NULL, and initializes *strm and
-/// strm->internal. This function is only called via lzma_next_strm_init2 macro.
+/// strm->internal. This function is only called via lzma_next_strm_init macro.
extern lzma_ret lzma_strm_init(lzma_stream *strm);
/// Initializes the next filter in the chain, if any. This takes care of
@@ -269,37 +269,15 @@ do { \
/// (The function being called will use lzma_next_coder_init()). If
/// initialization fails, memory that wasn't freed by func() is freed
/// along strm->internal.
-#define lzma_next_strm_init1(func, strm, arg1) \
+#define lzma_next_strm_init(func, strm, ...) \
do { \
- lzma_ret ret_; \
- return_if_error(lzma_strm_init(strm)); \
- ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1); \
- if (ret_ != LZMA_OK) { \
- lzma_end(strm); \
- return ret_; \
- } \
-} while (0)
-
-#define lzma_next_strm_init2(func, strm, arg1, arg2) \
-do { \
- lzma_ret ret_; \
- return_if_error(lzma_strm_init(strm)); \
- ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2); \
- if (ret_ != LZMA_OK) { \
- lzma_end(strm); \
- return ret_; \
- } \
-} while (0)
-
-#define lzma_next_strm_init3(func, strm, arg1, arg2, arg3) \
-do { \
- lzma_ret ret_; \
- return_if_error(lzma_strm_init(strm)); \
- ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2, arg3); \
- if (ret_ != LZMA_OK) { \
- lzma_end(strm); \
- return ret_; \
- } \
+ return_if_error(lzma_strm_init(strm)); \
+ const lzma_ret ret_ = func(&(strm)->internal->next, \
+ (strm)->allocator, __VA_ARGS__); \
+ if (ret_ != LZMA_OK) { \
+ lzma_end(strm); \
+ return ret_; \
+ } \
} while (0)
#endif
diff --git a/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c
index 65665c1..2d35ef8 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_buffer_decoder.c
@@ -18,26 +18,22 @@ lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- lzma_next_coder next = LZMA_NEXT_CODER_INIT;
- size_t in_start;
- size_t out_start;
- lzma_ret ret;
-
// Validate what isn't validated later in filter_common.c.
if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL
|| out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR;
// Initialize the decoer.
+ lzma_next_coder next = LZMA_NEXT_CODER_INIT;
return_if_error(lzma_raw_decoder_init(&next, allocator, filters));
// Store the positions so that we can restore them if something
// goes wrong.
- in_start = *in_pos;
- out_start = *out_pos;
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
// Do the actual decoding and free decoder's memory.
- ret = next.code(next.coder, allocator, in, in_pos, in_size,
+ lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size,
out, out_pos, out_size, LZMA_FINISH);
if (ret == LZMA_STREAM_END) {
diff --git a/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c
index b23329f..646e1b3 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_buffer_encoder.c
@@ -18,25 +18,22 @@ lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator,
const uint8_t *in, size_t in_size, uint8_t *out,
size_t *out_pos, size_t out_size)
{
- lzma_next_coder next = LZMA_NEXT_CODER_INIT;
- size_t out_start;
- size_t in_pos = 0;
- lzma_ret ret;
-
// Validate what isn't validated later in filter_common.c.
if ((in == NULL && in_size != 0) || out == NULL
|| out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR;
// Initialize the encoder
+ lzma_next_coder next = LZMA_NEXT_CODER_INIT;
return_if_error(lzma_raw_encoder_init(&next, allocator, filters));
// Store the output position so that we can restore it if
// something goes wrong.
- out_start = *out_pos;
+ const size_t out_start = *out_pos;
// Do the actual encoding and free coder's memory.
- ret = next.code(next.coder, allocator, in, &in_pos, in_size,
+ size_t in_pos = 0;
+ lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size,
out, out_pos, out_size, LZMA_FINISH);
lzma_next_end(&next, allocator);
diff --git a/Utilities/cmliblzma/liblzma/common/filter_common.c b/Utilities/cmliblzma/liblzma/common/filter_common.c
index d2b9e08..7c95b05 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_common.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_common.c
@@ -36,87 +36,87 @@ static const struct {
} features[] = {
#if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1)
{
- LZMA_FILTER_LZMA1,
- sizeof(lzma_options_lzma),
- false,
- true,
- true,
+ .id = LZMA_FILTER_LZMA1,
+ .options_size = sizeof(lzma_options_lzma),
+ .non_last_ok = false,
+ .last_ok = true,
+ .changes_size = true,
},
#endif
#if defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2)
{
- LZMA_FILTER_LZMA2,
- sizeof(lzma_options_lzma),
- false,
- true,
- true,
+ .id = LZMA_FILTER_LZMA2,
+ .options_size = sizeof(lzma_options_lzma),
+ .non_last_ok = false,
+ .last_ok = true,
+ .changes_size = true,
},
#endif
#if defined(HAVE_ENCODER_X86) || defined(HAVE_DECODER_X86)
{
- LZMA_FILTER_X86,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_X86,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC)
{
- LZMA_FILTER_POWERPC,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_POWERPC,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_IA64) || defined(HAVE_DECODER_IA64)
{
- LZMA_FILTER_IA64,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_IA64,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM)
{
- LZMA_FILTER_ARM,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_ARM,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB)
{
- LZMA_FILTER_ARMTHUMB,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_ARMTHUMB,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC)
{
- LZMA_FILTER_SPARC,
- sizeof(lzma_options_bcj),
- true,
- false,
- false,
+ .id = LZMA_FILTER_SPARC,
+ .options_size = sizeof(lzma_options_bcj),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
#if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA)
{
- LZMA_FILTER_DELTA,
- sizeof(lzma_options_delta),
- true,
- false,
- false,
+ .id = LZMA_FILTER_DELTA,
+ .options_size = sizeof(lzma_options_delta),
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
},
#endif
{
- LZMA_VLI_UNKNOWN
+ .id = LZMA_VLI_UNKNOWN
}
};
@@ -125,12 +125,11 @@ extern LZMA_API(lzma_ret)
lzma_filters_copy(const lzma_filter *src, lzma_filter *dest,
lzma_allocator *allocator)
{
- size_t i;
- lzma_ret ret;
-
if (src == NULL || dest == NULL)
return LZMA_PROG_ERROR;
+ lzma_ret ret;
+ size_t i;
for (i = 0; src[i].id != LZMA_VLI_UNKNOWN; ++i) {
// There must be a maximum of four filters plus
// the array terminator.
@@ -194,6 +193,10 @@ error:
static lzma_ret
validate_chain(const lzma_filter *filters, size_t *count)
{
+ // There must be at least one filter.
+ if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
+ return LZMA_PROG_ERROR;
+
// Number of non-last filters that may change the size of the data
// significantly (that is, more than 1-2 % or so).
size_t changes_size_count = 0;
@@ -207,11 +210,6 @@ validate_chain(const lzma_filter *filters, size_t *count)
bool last_ok = false;
size_t i = 0;
-
- // There must be at least one filter.
- if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
- return LZMA_PROG_ERROR;
-
do {
size_t j;
for (j = 0; filters[i].id != features[j].id; ++j)
@@ -245,17 +243,14 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter *options,
lzma_filter_find coder_find, bool is_encoder)
{
- lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
- size_t count;
- size_t i;
- lzma_ret ret;
-
// Do some basic validation and get the number of filters.
+ size_t count;
return_if_error(validate_chain(options, &count));
// Set the filter functions and copy the options pointer.
+ lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
if (is_encoder) {
- for (i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count; ++i) {
// The order of the filters is reversed in the
// encoder. It allows more efficient handling
// of the uncompressed data.
@@ -271,7 +266,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
filters[j].options = options[i].options;
}
} else {
- for (i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count; ++i) {
const lzma_filter_coder *const fc
= coder_find(options[i].id);
if (fc == NULL || fc->init == NULL)
@@ -288,7 +283,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
filters[count].init = NULL;
// Initialize the filters.
- ret = lzma_next_filter_init(next, allocator, filters);
+ const lzma_ret ret = lzma_next_filter_init(next, allocator, filters);
if (ret != LZMA_OK)
lzma_next_end(next, allocator);
@@ -300,9 +295,6 @@ extern uint64_t
lzma_raw_coder_memusage(lzma_filter_find coder_find,
const lzma_filter *filters)
{
- uint64_t total = 0;
- size_t i = 0;
-
// The chain has to have at least one filter.
{
size_t tmp;
@@ -310,6 +302,9 @@ lzma_raw_coder_memusage(lzma_filter_find coder_find,
return UINT64_MAX;
}
+ uint64_t total = 0;
+ size_t i = 0;
+
do {
const lzma_filter_coder *const fc
= coder_find(filters[i].id);
diff --git a/Utilities/cmliblzma/liblzma/common/filter_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_decoder.c
index cce2b30..1ebbe2a 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_decoder.c
@@ -44,74 +44,74 @@ typedef struct {
static const lzma_filter_decoder decoders[] = {
#ifdef HAVE_DECODER_LZMA1
{
- LZMA_FILTER_LZMA1,
- &lzma_lzma_decoder_init,
- &lzma_lzma_decoder_memusage,
- &lzma_lzma_props_decode,
+ .id = LZMA_FILTER_LZMA1,
+ .init = &lzma_lzma_decoder_init,
+ .memusage = &lzma_lzma_decoder_memusage,
+ .props_decode = &lzma_lzma_props_decode,
},
#endif
#ifdef HAVE_DECODER_LZMA2
{
- LZMA_FILTER_LZMA2,
- &lzma_lzma2_decoder_init,
- &lzma_lzma2_decoder_memusage,
- &lzma_lzma2_props_decode,
+ .id = LZMA_FILTER_LZMA2,
+ .init = &lzma_lzma2_decoder_init,
+ .memusage = &lzma_lzma2_decoder_memusage,
+ .props_decode = &lzma_lzma2_props_decode,
},
#endif
#ifdef HAVE_DECODER_X86
{
- LZMA_FILTER_X86,
- &lzma_simple_x86_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_X86,
+ .init = &lzma_simple_x86_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_POWERPC
{
- LZMA_FILTER_POWERPC,
- &lzma_simple_powerpc_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_POWERPC,
+ .init = &lzma_simple_powerpc_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_IA64
{
- LZMA_FILTER_IA64,
- &lzma_simple_ia64_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_IA64,
+ .init = &lzma_simple_ia64_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_ARM
{
- LZMA_FILTER_ARM,
- &lzma_simple_arm_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_ARM,
+ .init = &lzma_simple_arm_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_ARMTHUMB
{
- LZMA_FILTER_ARMTHUMB,
- &lzma_simple_armthumb_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_ARMTHUMB,
+ .init = &lzma_simple_armthumb_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_SPARC
{
- LZMA_FILTER_SPARC,
- &lzma_simple_sparc_decoder_init,
- NULL,
- &lzma_simple_props_decode,
+ .id = LZMA_FILTER_SPARC,
+ .init = &lzma_simple_sparc_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
},
#endif
#ifdef HAVE_DECODER_DELTA
{
- LZMA_FILTER_DELTA,
- &lzma_delta_decoder_init,
- &lzma_delta_coder_memusage,
- &lzma_delta_props_decode,
+ .id = LZMA_FILTER_DELTA,
+ .init = &lzma_delta_decoder_init,
+ .memusage = &lzma_delta_coder_memusage,
+ .props_decode = &lzma_delta_props_decode,
},
#endif
};
@@ -120,8 +120,7 @@ static const lzma_filter_decoder decoders[] = {
static const lzma_filter_decoder *
decoder_find(lzma_vli id)
{
- size_t i;
- for (i = 0; i < ARRAY_SIZE(decoders); ++i)
+ for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i)
if (decoders[i].id == id)
return decoders + i;
@@ -148,7 +147,7 @@ lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options)
{
- lzma_next_strm_init1(lzma_raw_decoder_init, strm, options);
+ lzma_next_strm_init(lzma_raw_decoder_init, strm, options);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
@@ -169,11 +168,10 @@ extern LZMA_API(lzma_ret)
lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- const lzma_filter_decoder *const fd = decoder_find(filter->id);
-
// Make it always NULL so that the caller can always safely free() it.
filter->options = NULL;
+ const lzma_filter_decoder *const fd = decoder_find(filter->id);
if (fd == NULL)
return LZMA_OPTIONS_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/common/filter_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_encoder.c
index 9fdb100..635d812 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_encoder.c
@@ -56,101 +56,95 @@ typedef struct {
static const lzma_filter_encoder encoders[] = {
#ifdef HAVE_ENCODER_LZMA1
{
- LZMA_FILTER_LZMA1,
- &lzma_lzma_encoder_init,
- &lzma_lzma_encoder_memusage,
- NULL, // FIXME
- NULL,
- 5,
- &lzma_lzma_props_encode,
+ .id = LZMA_FILTER_LZMA1,
+ .init = &lzma_lzma_encoder_init,
+ .memusage = &lzma_lzma_encoder_memusage,
+ .chunk_size = NULL, // FIXME
+ .props_size_get = NULL,
+ .props_size_fixed = 5,
+ .props_encode = &lzma_lzma_props_encode,
},
#endif
#ifdef HAVE_ENCODER_LZMA2
{
- LZMA_FILTER_LZMA2,
- &lzma_lzma2_encoder_init,
- &lzma_lzma2_encoder_memusage,
- NULL, // FIXME
- NULL,
- 1,
- &lzma_lzma2_props_encode,
+ .id = LZMA_FILTER_LZMA2,
+ .init = &lzma_lzma2_encoder_init,
+ .memusage = &lzma_lzma2_encoder_memusage,
+ .chunk_size = NULL, // FIXME
+ .props_size_get = NULL,
+ .props_size_fixed = 1,
+ .props_encode = &lzma_lzma2_props_encode,
},
#endif
#ifdef HAVE_ENCODER_X86
{
- LZMA_FILTER_X86,
- &lzma_simple_x86_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_X86,
+ .init = &lzma_simple_x86_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_POWERPC
{
- LZMA_FILTER_POWERPC,
- &lzma_simple_powerpc_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_POWERPC,
+ .init = &lzma_simple_powerpc_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_IA64
{
- LZMA_FILTER_IA64,
- &lzma_simple_ia64_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_IA64,
+ .init = &lzma_simple_ia64_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_ARM
{
- LZMA_FILTER_ARM,
- &lzma_simple_arm_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_ARM,
+ .init = &lzma_simple_arm_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_ARMTHUMB
{
- LZMA_FILTER_ARMTHUMB,
- &lzma_simple_armthumb_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_ARMTHUMB,
+ .init = &lzma_simple_armthumb_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_SPARC
{
- LZMA_FILTER_SPARC,
- &lzma_simple_sparc_encoder_init,
- NULL,
- NULL,
- &lzma_simple_props_size,
- 0,
- &lzma_simple_props_encode,
+ .id = LZMA_FILTER_SPARC,
+ .init = &lzma_simple_sparc_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
},
#endif
#ifdef HAVE_ENCODER_DELTA
{
- LZMA_FILTER_DELTA,
- &lzma_delta_encoder_init,
- &lzma_delta_coder_memusage,
- NULL,
- NULL,
- 1,
- &lzma_delta_props_encode,
+ .id = LZMA_FILTER_DELTA,
+ .init = &lzma_delta_encoder_init,
+ .memusage = &lzma_delta_coder_memusage,
+ .chunk_size = NULL,
+ .props_size_get = NULL,
+ .props_size_fixed = 1,
+ .props_encode = &lzma_delta_props_encode,
},
#endif
};
@@ -159,8 +153,7 @@ static const lzma_filter_encoder encoders[] = {
static const lzma_filter_encoder *
encoder_find(lzma_vli id)
{
- size_t i;
- for (i = 0; i < ARRAY_SIZE(encoders); ++i)
+ for (size_t i = 0; i < ARRAY_SIZE(encoders); ++i)
if (encoders[i].id == id)
return encoders + i;
@@ -178,10 +171,6 @@ lzma_filter_encoder_is_supported(lzma_vli id)
extern LZMA_API(lzma_ret)
lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
{
- size_t i;
- size_t count = 1;
- lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1];
-
if (strm->internal->next.update == NULL)
return LZMA_PROG_ERROR;
@@ -191,10 +180,12 @@ lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
// The actual filter chain in the encoder is reversed. Some things
// still want the normal order chain, so we provide both.
+ size_t count = 1;
while (filters[count].id != LZMA_VLI_UNKNOWN)
++count;
- for (i = 0; i < count; ++i)
+ lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1];
+ for (size_t i = 0; i < count; ++i)
reversed_filters[count - i - 1] = filters[i];
reversed_filters[count].id = LZMA_VLI_UNKNOWN;
@@ -216,7 +207,7 @@ lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options)
{
- lzma_next_strm_init3(lzma_raw_coder_init, strm, options,
+ lzma_next_strm_init(lzma_raw_coder_init, strm, options,
(lzma_filter_find)(&encoder_find), true);
strm->internal->supported_actions[LZMA_RUN] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c b/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c
index aa2dbd5..caae10c 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_flags_decoder.c
@@ -18,9 +18,6 @@ lzma_filter_flags_decode(
lzma_filter *filter, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size)
{
- lzma_vli props_size;
- lzma_ret ret;
-
// Set the pointer to NULL so the caller can always safely free it.
filter->options = NULL;
@@ -32,6 +29,7 @@ lzma_filter_flags_decode(
return LZMA_DATA_ERROR;
// Size of Properties
+ lzma_vli props_size;
return_if_error(lzma_vli_decode(&props_size, NULL,
in, in_pos, in_size));
@@ -39,7 +37,7 @@ lzma_filter_flags_decode(
if (in_size - *in_pos < props_size)
return LZMA_DATA_ERROR;
- ret = lzma_properties_decode(
+ const lzma_ret ret = lzma_properties_decode(
filter, allocator, in + *in_pos, props_size);
*in_pos += props_size;
diff --git a/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c b/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c
index 755c407..d110566 100644
--- a/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c
@@ -31,8 +31,6 @@ extern LZMA_API(lzma_ret)
lzma_filter_flags_encode(const lzma_filter *filter,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- uint32_t props_size;
-
// Filter ID
if (filter->id >= LZMA_FILTER_RESERVED_START)
return LZMA_PROG_ERROR;
@@ -41,6 +39,7 @@ lzma_filter_flags_encode(const lzma_filter *filter,
out, out_pos, out_size));
// Size of Properties
+ uint32_t props_size;
return_if_error(lzma_properties_size(&props_size, filter));
return_if_error(lzma_vli_encode(props_size, NULL,
out, out_pos, out_size));
diff --git a/Utilities/cmliblzma/liblzma/common/index.c b/Utilities/cmliblzma/liblzma/common/index.c
index f0f67ca..9af4bc1 100644
--- a/Utilities/cmliblzma/liblzma/common/index.c
+++ b/Utilities/cmliblzma/liblzma/common/index.c
@@ -230,7 +230,6 @@ index_tree_end(index_tree *tree, lzma_allocator *allocator,
static void
index_tree_append(index_tree *tree, index_tree_node *node)
{
- uint32_t up;
node->parent = tree->rightmost;
node->left = NULL;
node->right = NULL;
@@ -259,10 +258,8 @@ index_tree_append(index_tree *tree, index_tree_node *node)
// and thus know the state of the tree just by looking at the node
// count. From the node count we can calculate how many steps to go
// up in the tree to find the rotation root.
- up = tree->count ^ (UINT32_C(1) << bsr32(tree->count));
+ uint32_t up = tree->count ^ (UINT32_C(1) << bsr32(tree->count));
if (up != 0) {
- index_tree_node *pivot;
-
// Locate the root node for the rotation.
up = ctz32(tree->count) + 2;
do {
@@ -270,7 +267,7 @@ index_tree_append(index_tree *tree, index_tree_node *node)
} while (--up > 0);
// Rotate left using node as the rotation root.
- pivot = node->right;
+ index_tree_node *pivot = node->right;
if (node->parent == NULL) {
tree->root = pivot;
@@ -400,13 +397,11 @@ index_init_plain(lzma_allocator *allocator)
extern LZMA_API(lzma_index *)
lzma_index_init(lzma_allocator *allocator)
{
- index_stream *s;
-
lzma_index *i = index_init_plain(allocator);
if (i == NULL)
return NULL;
- s = index_stream_init(0, 0, 1, 0, allocator);
+ index_stream *s = index_stream_init(0, 0, 1, 0, allocator);
if (s == NULL) {
lzma_free(i, allocator);
return NULL;
@@ -605,8 +600,6 @@ lzma_index_padding_size(const lzma_index *i)
extern LZMA_API(lzma_ret)
lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
{
- index_stream *s;
-
if (i == NULL || stream_flags == NULL)
return LZMA_PROG_ERROR;
@@ -614,7 +607,7 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
return_if_error(lzma_stream_flags_compare(
stream_flags, stream_flags));
- s = (index_stream *)(i->streams.rightmost);
+ index_stream *s = (index_stream *)(i->streams.rightmost);
s->stream_flags = *stream_flags;
return LZMA_OK;
@@ -624,17 +617,14 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
extern LZMA_API(lzma_ret)
lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding)
{
- index_stream *s;
- lzma_vli old_stream_padding;
-
if (i == NULL || stream_padding > LZMA_VLI_MAX
|| (stream_padding & 3) != 0)
return LZMA_PROG_ERROR;
- s = (index_stream *)(i->streams.rightmost);
+ index_stream *s = (index_stream *)(i->streams.rightmost);
// Check that the new value won't make the file grow too big.
- old_stream_padding = s->stream_padding;
+ const lzma_vli old_stream_padding = s->stream_padding;
s->stream_padding = 0;
if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) {
s->stream_padding = old_stream_padding;
@@ -650,26 +640,20 @@ extern LZMA_API(lzma_ret)
lzma_index_append(lzma_index *i, lzma_allocator *allocator,
lzma_vli unpadded_size, lzma_vli uncompressed_size)
{
- index_stream *s;
- index_group *g;
- lzma_vli compressed_base;
- lzma_vli uncompressed_base;
- uint32_t index_list_size_add;
-
// Validate.
if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN
|| unpadded_size > UNPADDED_SIZE_MAX
|| uncompressed_size > LZMA_VLI_MAX)
return LZMA_PROG_ERROR;
- s = (index_stream *)(i->streams.rightmost);
- g = (index_group *)(s->groups.rightmost);
+ index_stream *s = (index_stream *)(i->streams.rightmost);
+ index_group *g = (index_group *)(s->groups.rightmost);
- compressed_base = g == NULL ? 0
+ const lzma_vli compressed_base = g == NULL ? 0
: vli_ceil4(g->records[g->last].unpadded_sum);
- uncompressed_base = g == NULL ? 0
+ const lzma_vli uncompressed_base = g == NULL ? 0
: g->records[g->last].uncompressed_sum;
- index_list_size_add = lzma_vli_size(unpadded_size)
+ const uint32_t index_list_size_add = lzma_vli_size(unpadded_size)
+ lzma_vli_size(uncompressed_size);
// Check that the file size will stay within limits.
@@ -783,7 +767,6 @@ extern LZMA_API(lzma_ret)
lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
lzma_allocator *allocator)
{
- index_cat_info info;
const lzma_vli dest_file_size = lzma_index_file_size(dest);
// Check that we don't exceed the file size limits.
@@ -813,12 +796,10 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
index_stream *s = (index_stream *)(dest->streams.rightmost);
index_group *g = (index_group *)(s->groups.rightmost);
if (g != NULL && g->last + 1 < g->allocated) {
- index_group *newg;
-
assert(g->node.left == NULL);
assert(g->node.right == NULL);
- newg = lzma_alloc(sizeof(index_group)
+ index_group *newg = lzma_alloc(sizeof(index_group)
+ (g->last + 1)
* sizeof(index_record),
allocator);
@@ -853,12 +834,13 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
// Add all the Streams from src to dest. Update the base offsets
// of each Stream from src.
- info.uncompressed_size = dest->uncompressed_size;
- info.file_size = dest_file_size;
- info.stream_number_add = dest->streams.count;
- info.block_number_add = dest->record_count;
- info.streams = &dest->streams;
-
+ const index_cat_info info = {
+ .uncompressed_size = dest->uncompressed_size,
+ .file_size = dest_file_size,
+ .stream_number_add = dest->streams.count,
+ .block_number_add = dest->record_count,
+ .streams = &dest->streams,
+ };
index_cat_helper(&info, (index_stream *)(src->streams.root));
// Update info about all the combined Streams.
@@ -879,17 +861,12 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
static index_stream *
index_dup_stream(const index_stream *src, lzma_allocator *allocator)
{
- index_stream *dest;
- index_group *destg;
- index_group *srcg;
- size_t i = 0;
-
// Catch a somewhat theoretical integer overflow.
if (src->record_count > PREALLOC_MAX)
return NULL;
// Allocate and initialize a new Stream.
- dest = index_stream_init(src->node.compressed_base,
+ index_stream *dest = index_stream_init(src->node.compressed_base,
src->node.uncompressed_base, src->number,
src->block_number_base, allocator);
@@ -907,7 +884,7 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
// Allocate memory for the Records. We put all the Records into
// a single group. It's simplest and also tends to make
// lzma_index_locate() a little bit faster with very big Indexes.
- destg = lzma_alloc(sizeof(index_group)
+ index_group *destg = lzma_alloc(sizeof(index_group)
+ src->record_count * sizeof(index_record),
allocator);
if (destg == NULL) {
@@ -923,7 +900,8 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
destg->last = src->record_count - 1;
// Go through all the groups in src and copy the Records into destg.
- srcg = (index_group *)(src->groups.leftmost);
+ const index_group *srcg = (const index_group *)(src->groups.leftmost);
+ size_t i = 0;
do {
memcpy(destg->records + i, srcg->records,
(srcg->last + 1) * sizeof(index_record));
@@ -943,9 +921,6 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
extern LZMA_API(lzma_index *)
lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
{
- index_stream *srcstream;
- index_stream *deststream;
-
// Allocate the base structure (no initial Stream).
lzma_index *dest = index_init_plain(allocator);
if (dest == NULL)
@@ -958,9 +933,11 @@ lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
dest->index_list_size = src->index_list_size;
// Copy the Streams and the groups in them.
- srcstream = (index_stream *)(src->streams.leftmost);
+ const index_stream *srcstream
+ = (const index_stream *)(src->streams.leftmost);
do {
- deststream = index_dup_stream(srcstream, allocator);
+ index_stream *deststream = index_dup_stream(
+ srcstream, allocator);
if (deststream == NULL) {
lzma_index_end(dest, allocator);
return NULL;
@@ -1119,19 +1096,14 @@ lzma_index_iter_rewind(lzma_index_iter *iter)
extern LZMA_API(lzma_bool)
lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode)
{
- const lzma_index *i;
- const index_stream *stream;
- const index_group *group;
- size_t record;
-
// Catch unsupported mode values.
if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK)
return true;
- i = iter->internal[ITER_INDEX].p;
- stream = iter->internal[ITER_STREAM].p;
- group = NULL;
- record = iter->internal[ITER_RECORD].s;
+ const lzma_index *i = iter->internal[ITER_INDEX].p;
+ const index_stream *stream = iter->internal[ITER_STREAM].p;
+ const index_group *group = NULL;
+ size_t record = iter->internal[ITER_RECORD].s;
// If we are being asked for the next Stream, leave group to NULL
// so that the rest of the this function thinks that this Stream
@@ -1231,10 +1203,6 @@ again:
extern LZMA_API(lzma_bool)
lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
{
- const index_stream *stream;
- const index_group *group;
- size_t left, right;
-
const lzma_index *i = iter->internal[ITER_INDEX].p;
// If the target is past the end of the file, return immediately.
@@ -1242,12 +1210,12 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
return true;
// Locate the Stream containing the target offset.
- stream = index_tree_locate(&i->streams, target);
+ const index_stream *stream = index_tree_locate(&i->streams, target);
assert(stream != NULL);
target -= stream->node.uncompressed_base;
// Locate the group containing the target offset.
- group = index_tree_locate(&stream->groups, target);
+ const index_group *group = index_tree_locate(&stream->groups, target);
assert(group != NULL);
// Use binary search to locate the exact Record. It is the first
@@ -1255,8 +1223,8 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
// This is because we want the rightmost Record that fullfills the
// search criterion. It is possible that there are empty Blocks;
// we don't want to return them.
- left = 0;
- right = group->last;
+ size_t left = 0;
+ size_t right = group->last;
while (left < right) {
const size_t pos = left + (right - left) / 2;
diff --git a/Utilities/cmliblzma/liblzma/common/index_decoder.c b/Utilities/cmliblzma/liblzma/common/index_decoder.c
index 6c91f10..83c8a3a 100644
--- a/Utilities/cmliblzma/liblzma/common/index_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/index_decoder.c
@@ -289,7 +289,7 @@ index_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit)
{
- lzma_next_strm_init2(index_decoder_init, strm, i, memlimit);
+ lzma_next_strm_init(index_decoder_init, strm, i, memlimit);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
@@ -303,23 +303,21 @@ lzma_index_buffer_decode(
lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size)
{
- lzma_coder coder;
- lzma_ret ret;
-
- // Store the input start position so that we can restore it in case
- // of an error.
- const size_t in_start = *in_pos;
-
// Sanity checks
if (i == NULL || memlimit == NULL
|| in == NULL || in_pos == NULL || *in_pos > in_size)
return LZMA_PROG_ERROR;
// Initialize the decoder.
+ lzma_coder coder;
return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit));
+ // Store the input start position so that we can restore it in case
+ // of an error.
+ const size_t in_start = *in_pos;
+
// Do the actual decoding.
- ret = index_decode(&coder, allocator, in, in_pos, in_size,
+ lzma_ret ret = index_decode(&coder, allocator, in, in_pos, in_size,
NULL, NULL, 0, LZMA_RUN);
if (ret == LZMA_STREAM_END) {
diff --git a/Utilities/cmliblzma/liblzma/common/index_encoder.c b/Utilities/cmliblzma/liblzma/common/index_encoder.c
index a6f8598..45919f0 100644
--- a/Utilities/cmliblzma/liblzma/common/index_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/index_encoder.c
@@ -207,7 +207,7 @@ lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_index_encoder(lzma_stream *strm, const lzma_index *i)
{
- lzma_next_strm_init1(lzma_index_encoder_init, strm, i);
+ lzma_next_strm_init(lzma_index_encoder_init, strm, i);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
@@ -220,10 +220,6 @@ extern LZMA_API(lzma_ret)
lzma_index_buffer_encode(const lzma_index *i,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- lzma_coder coder;
- size_t out_start;
- lzma_ret ret;
-
// Validate the arguments.
if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR;
@@ -234,12 +230,13 @@ lzma_index_buffer_encode(const lzma_index *i,
// The Index encoder needs just one small data structure so we can
// allocate it on stack.
+ lzma_coder coder;
index_encoder_reset(&coder, i);
// Do the actual encoding. This should never fail, but store
// the original *out_pos just in case.
- out_start = *out_pos;
- ret = index_encode(&coder, NULL, NULL, NULL, 0,
+ const size_t out_start = *out_pos;
+ lzma_ret ret = index_encode(&coder, NULL, NULL, NULL, 0,
out, out_pos, out_size, LZMA_RUN);
if (ret == LZMA_STREAM_END) {
diff --git a/Utilities/cmliblzma/liblzma/common/index_hash.c b/Utilities/cmliblzma/liblzma/common/index_hash.c
index 0cf86b3..e3e9386 100644
--- a/Utilities/cmliblzma/liblzma/common/index_hash.c
+++ b/Utilities/cmliblzma/liblzma/common/index_hash.c
@@ -124,14 +124,13 @@ static lzma_ret
hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size,
lzma_vli uncompressed_size)
{
- const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
-
info->blocks_size += vli_ceil4(unpadded_size);
info->uncompressed_size += uncompressed_size;
info->index_list_size += lzma_vli_size(unpadded_size)
+ lzma_vli_size(uncompressed_size);
++info->count;
+ const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
lzma_check_update(&info->check, LZMA_CHECK_BEST,
(const uint8_t *)(sizes), sizeof(sizes));
@@ -174,9 +173,6 @@ extern LZMA_API(lzma_ret)
lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
size_t *in_pos, size_t in_size)
{
- size_t in_start;
- lzma_ret ret;
-
// Catch zero input buffer here, because in contrast to Index encoder
// and decoder functions, applications call this function directly
// instead of via lzma_code(), which does the buffer checking.
@@ -186,8 +182,8 @@ lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
// NOTE: This function has many similarities to index_encode() and
// index_decode() functions found from index_encoder.c and
// index_decoder.c. See the comments especially in index_encoder.c.
- in_start = *in_pos;
- ret = LZMA_OK;
+ const size_t in_start = *in_pos;
+ lzma_ret ret = LZMA_OK;
while (*in_pos < in_size)
switch (index_hash->sequence) {
diff --git a/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c
index 9e2e1da..ae75315 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_buffer_decoder.c
@@ -19,9 +19,6 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size)
{
- lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
- lzma_ret ret;
-
// Sanity checks
if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|| *in_pos > in_size || out_pos == NULL
@@ -36,7 +33,8 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
// Initialize the Stream decoder.
// TODO: We need something to tell the decoder that it can use the
// output buffer as workspace, and thus save significant amount of RAM.
- ret = lzma_stream_decoder_init(
+ lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_stream_decoder_init(
&stream_decoder, allocator, *memlimit, flags);
if (ret == LZMA_OK) {
diff --git a/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c
index 8bca87f..2450ee2 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_buffer_encoder.c
@@ -45,10 +45,6 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
lzma_allocator *allocator, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos_ptr, size_t out_size)
{
- lzma_stream_flags stream_flags = { 0 };
- lzma_block block = { 0 };
- size_t out_pos;
-
// Sanity checks
if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX
|| (in == NULL && in_size != 0) || out == NULL
@@ -65,7 +61,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
// Use a local copy. We update *out_pos_ptr only if everything
// succeeds.
- out_pos = *out_pos_ptr;
+ size_t out_pos = *out_pos_ptr;
// Check that there's enough space for both Stream Header and
// Stream Footer.
@@ -77,7 +73,10 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
out_size -= LZMA_STREAM_HEADER_SIZE;
// Encode the Stream Header.
- stream_flags.check = check;
+ lzma_stream_flags stream_flags = {
+ .version = 0,
+ .check = check,
+ };
if (lzma_stream_header_encode(&stream_flags, out + out_pos)
!= LZMA_OK)
@@ -86,8 +85,11 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
out_pos += LZMA_STREAM_HEADER_SIZE;
// Encode a Block but only if there is at least one byte of input.
- block.check = check;
- block.filters = filters;
+ lzma_block block = {
+ .version = 0,
+ .check = check,
+ .filters = filters,
+ };
if (in_size > 0)
return_if_error(lzma_block_buffer_encode(&block, allocator,
@@ -95,8 +97,6 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
// Index
{
- lzma_ret ret;
-
// Create an Index. It will have one Record if there was
// at least one byte of input to encode. Otherwise the
// Index will be empty.
@@ -104,7 +104,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
if (i == NULL)
return LZMA_MEM_ERROR;
- ret = LZMA_OK;
+ lzma_ret ret = LZMA_OK;
if (in_size > 0)
ret = lzma_index_append(i, allocator,
diff --git a/Utilities/cmliblzma/liblzma/common/stream_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_decoder.c
index 56a009b..37ea71e 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_decoder.c
@@ -106,8 +106,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
while (true)
switch (coder->sequence) {
case SEQ_STREAM_HEADER: {
- lzma_ret ret;
-
// Copy the Stream Header to the internal buffer.
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
LZMA_STREAM_HEADER_SIZE);
@@ -119,7 +117,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
coder->pos = 0;
// Decode the Stream Header.
- ret = lzma_stream_header_decode(
+ const lzma_ret ret = lzma_stream_header_decode(
&coder->stream_flags, coder->buffer);
if (ret != LZMA_OK)
return ret == LZMA_FORMAT_ERROR && !coder->first_stream
@@ -156,11 +154,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Fall through
case SEQ_BLOCK_HEADER: {
- lzma_filter filters[LZMA_FILTERS_MAX + 1];
- uint64_t memusage;
- lzma_ret ret;
- size_t i;
-
if (*in_pos >= in_size)
return LZMA_OK;
@@ -195,6 +188,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Set up a buffer to hold the filter chain. Block Header
// decoder will initialize all members of this array so
// we don't need to do it here.
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
coder->block_options.filters = filters;
// Decode the Block Header.
@@ -202,7 +196,9 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
allocator, coder->buffer));
// Check the memory usage limit.
- memusage = lzma_raw_decoder_memusage(filters);
+ const uint64_t memusage = lzma_raw_decoder_memusage(filters);
+ lzma_ret ret;
+
if (memusage == UINT64_MAX) {
// One or more unknown Filter IDs.
ret = LZMA_OPTIONS_ERROR;
@@ -228,7 +224,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Free the allocated filter options since they are needed
// only to initialize the Block decoder.
- for (i = 0; i < LZMA_FILTERS_MAX; ++i)
+ for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i)
lzma_free(filters[i].options, allocator);
coder->block_options.filters = NULL;
@@ -264,8 +260,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
}
case SEQ_INDEX: {
- lzma_ret ret;
-
// If we don't have any input, don't call
// lzma_index_hash_decode() since it would return
// LZMA_BUF_ERROR, which we must not do here.
@@ -274,7 +268,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Decode the Index and compare it to the hash calculated
// from the sizes of the Blocks (if any).
- ret = lzma_index_hash_decode(coder->index_hash,
+ const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
in, in_pos, in_size);
if (ret != LZMA_STREAM_END)
return ret;
@@ -285,9 +279,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Fall through
case SEQ_STREAM_FOOTER: {
- lzma_stream_flags footer_flags;
- lzma_ret ret;
-
// Copy the Stream Footer to the internal buffer.
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
LZMA_STREAM_HEADER_SIZE);
@@ -301,7 +292,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Decode the Stream Footer. The decoder gives
// LZMA_FORMAT_ERROR if the magic bytes don't match,
// so convert that return code to LZMA_DATA_ERROR.
- ret = lzma_stream_footer_decode(
+ lzma_stream_flags footer_flags;
+ const lzma_ret ret = lzma_stream_footer_decode(
&footer_flags, coder->buffer);
if (ret != LZMA_OK)
return ret == LZMA_FORMAT_ERROR
@@ -450,7 +442,7 @@ lzma_stream_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret)
lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
{
- lzma_next_strm_init2(lzma_stream_decoder_init, strm, memlimit, flags);
+ lzma_next_strm_init(lzma_stream_decoder_init, strm, memlimit, flags);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/stream_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_encoder.c
index e2f2e10..97a7a23 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_encoder.c
@@ -147,8 +147,6 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
}
case SEQ_BLOCK_ENCODE: {
- lzma_vli unpadded_size;
-
static const lzma_action convert[4] = {
LZMA_RUN,
LZMA_SYNC_FLUSH,
@@ -164,7 +162,7 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
return ret;
// Add a new Index Record.
- unpadded_size = lzma_block_unpadded_size(
+ const lzma_vli unpadded_size = lzma_block_unpadded_size(
&coder->block_options);
assert(unpadded_size != 0);
return_if_error(lzma_index_append(coder->index, allocator,
@@ -176,12 +174,6 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
}
case SEQ_INDEX_ENCODE: {
- const lzma_stream_flags stream_flags = {
- 0,
- lzma_index_size(coder->index),
- coder->block_options.check,
- };
-
// Call the Index encoder. It doesn't take any input, so
// those pointers can be NULL.
const lzma_ret ret = coder->index_encoder.code(
@@ -192,6 +184,11 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
return ret;
// Encode the Stream Footer into coder->buffer.
+ const lzma_stream_flags stream_flags = {
+ .version = 0,
+ .backward_size = lzma_index_size(coder->index),
+ .check = coder->block_options.check,
+ };
if (lzma_stream_footer_encode(&stream_flags, coder->buffer)
!= LZMA_OK)
@@ -214,13 +211,11 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
static void
stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
{
- size_t i;
-
lzma_next_end(&coder->block_encoder, allocator);
lzma_next_end(&coder->index_encoder, allocator);
lzma_index_end(coder->index, allocator);
- for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
+ for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
lzma_free(coder->filters[i].options, allocator);
lzma_free(coder, allocator);
@@ -233,18 +228,14 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
const lzma_filter *filters,
const lzma_filter *reversed_filters)
{
- size_t i;
-
if (coder->sequence <= SEQ_BLOCK_INIT) {
- lzma_ret ret;
-
// There is no incomplete Block waiting to be finished,
// thus we can change the whole filter chain. Start by
// trying to initialize the Block encoder with the new
// chain. This way we detect if the chain is valid.
coder->block_encoder_is_initialized = false;
coder->block_options.filters = (lzma_filter *)(filters);
- ret = block_encoder_init(coder, allocator);
+ const lzma_ret ret = block_encoder_init(coder, allocator);
coder->block_options.filters = coder->filters;
if (ret != LZMA_OK)
return ret;
@@ -264,7 +255,7 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
}
// Free the copy of the old chain and make a copy of the new chain.
- for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
+ for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
lzma_free(coder->filters[i].options, allocator);
return lzma_filters_copy(filters, coder->filters, allocator);
@@ -275,8 +266,6 @@ extern lzma_ret
lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter *filters, lzma_check check)
{
- lzma_stream_flags stream_flags = { 0, 0, check };
-
lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator);
if (filters == NULL)
@@ -309,6 +298,10 @@ lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
return LZMA_MEM_ERROR;
// Encode the Stream Header
+ lzma_stream_flags stream_flags = {
+ .version = 0,
+ .check = check,
+ };
return_if_error(lzma_stream_header_encode(
&stream_flags, next->coder->buffer));
@@ -327,7 +320,7 @@ extern LZMA_API(lzma_ret)
lzma_stream_encoder(lzma_stream *strm,
const lzma_filter *filters, lzma_check check)
{
- lzma_next_strm_init2(lzma_stream_encoder_init, strm, filters, check);
+ lzma_next_strm_init(lzma_stream_encoder_init, strm, filters, check);
strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
diff --git a/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c b/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c
index 8cf48a4..1bc2f97 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_flags_decoder.c
@@ -30,15 +30,13 @@ stream_flags_decode(lzma_stream_flags *options, const uint8_t *in)
extern LZMA_API(lzma_ret)
lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
{
- uint32_t crc;
-
// Magic
if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0)
return LZMA_FORMAT_ERROR;
// Verify the CRC32 so we can distinguish between corrupt
// and unsupported files.
- crc = lzma_crc32(in + sizeof(lzma_header_magic),
+ const uint32_t crc = lzma_crc32(in + sizeof(lzma_header_magic),
LZMA_STREAM_FLAGS_SIZE, 0);
if (crc != unaligned_read32le(in + sizeof(lzma_header_magic)
+ LZMA_STREAM_FLAGS_SIZE))
@@ -61,15 +59,13 @@ lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
extern LZMA_API(lzma_ret)
lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in)
{
- uint32_t crc;
-
// Magic
if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE,
lzma_footer_magic, sizeof(lzma_footer_magic)) != 0)
return LZMA_FORMAT_ERROR;
// CRC32
- crc = lzma_crc32(in + sizeof(uint32_t),
+ const uint32_t crc = lzma_crc32(in + sizeof(uint32_t),
sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0);
if (crc != unaligned_read32le(in))
return LZMA_DATA_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c b/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c
index 290339e..4e71715 100644
--- a/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c
+++ b/Utilities/cmliblzma/liblzma/common/stream_flags_encoder.c
@@ -29,8 +29,6 @@ stream_flags_encode(const lzma_stream_flags *options, uint8_t *out)
extern LZMA_API(lzma_ret)
lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
{
- uint32_t crc;
-
assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE
+ 4 == LZMA_STREAM_HEADER_SIZE);
@@ -45,7 +43,7 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
return LZMA_PROG_ERROR;
// CRC32 of the Stream Header
- crc = lzma_crc32(out + sizeof(lzma_header_magic),
+ const uint32_t crc = lzma_crc32(out + sizeof(lzma_header_magic),
LZMA_STREAM_FLAGS_SIZE, 0);
unaligned_write32le(out + sizeof(lzma_header_magic)
@@ -58,8 +56,6 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
extern LZMA_API(lzma_ret)
lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
{
- uint32_t crc;
-
assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic)
== LZMA_STREAM_HEADER_SIZE);
@@ -77,7 +73,7 @@ lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
return LZMA_PROG_ERROR;
// CRC32
- crc = lzma_crc32(
+ const uint32_t crc = lzma_crc32(
out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0);
unaligned_write32le(out, crc);
diff --git a/Utilities/cmliblzma/liblzma/common/vli_size.c b/Utilities/cmliblzma/liblzma/common/vli_size.c
index 8b931e4..ec1b4fa 100644
--- a/Utilities/cmliblzma/liblzma/common/vli_size.c
+++ b/Utilities/cmliblzma/liblzma/common/vli_size.c
@@ -16,11 +16,10 @@
extern LZMA_API(uint32_t)
lzma_vli_size(lzma_vli vli)
{
- uint32_t i = 0;
-
if (vli > LZMA_VLI_MAX)
return 0;
+ uint32_t i = 0;
do {
vli >>= 7;
++i;
diff --git a/Utilities/cmliblzma/liblzma/delta/delta_common.c b/Utilities/cmliblzma/liblzma/delta/delta_common.c
index 803e674..930ad21 100644
--- a/Utilities/cmliblzma/liblzma/delta/delta_common.c
+++ b/Utilities/cmliblzma/liblzma/delta/delta_common.c
@@ -27,8 +27,6 @@ extern lzma_ret
lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter_info *filters)
{
- const lzma_options_delta *opt;
-
// Allocate memory for the decoder if needed.
if (next->coder == NULL) {
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
@@ -45,7 +43,7 @@ lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
return LZMA_OPTIONS_ERROR;
// Set the delta distance.
- opt = filters[0].options;
+ const lzma_options_delta *opt = filters[0].options;
next->coder->distance = opt->dist;
// Initialize the rest of the variables.
diff --git a/Utilities/cmliblzma/liblzma/delta/delta_decoder.c b/Utilities/cmliblzma/liblzma/delta/delta_decoder.c
index 582e58e..2cf60d5 100644
--- a/Utilities/cmliblzma/liblzma/delta/delta_decoder.c
+++ b/Utilities/cmliblzma/liblzma/delta/delta_decoder.c
@@ -17,10 +17,9 @@
static void
decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size)
{
- size_t i;
const size_t distance = coder->distance;
- for (i = 0; i < size; ++i) {
+ for (size_t i = 0; i < size; ++i) {
buffer[i] += coder->history[(distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = buffer[i];
}
@@ -33,12 +32,11 @@ delta_decode(lzma_coder *coder, lzma_allocator *allocator,
size_t in_size, uint8_t *restrict out,
size_t *restrict out_pos, size_t out_size, lzma_action action)
{
- const size_t out_start = *out_pos;
- lzma_ret ret;
-
assert(coder->next.code != NULL);
- ret = coder->next.code(coder->next.coder, allocator,
+ const size_t out_start = *out_pos;
+
+ const lzma_ret ret = coder->next.code(coder->next.coder, allocator,
in, in_pos, in_size, out, out_pos, out_size,
action);
@@ -61,12 +59,11 @@ extern lzma_ret
lzma_delta_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_delta *opt;
-
if (props_size != 1)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_delta), allocator);
+ lzma_options_delta *opt
+ = lzma_alloc(sizeof(lzma_options_delta), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/delta/delta_encoder.c b/Utilities/cmliblzma/liblzma/delta/delta_encoder.c
index 8b9e4a8..15c7951 100644
--- a/Utilities/cmliblzma/liblzma/delta/delta_encoder.c
+++ b/Utilities/cmliblzma/liblzma/delta/delta_encoder.c
@@ -21,10 +21,9 @@ static void
copy_and_encode(lzma_coder *coder,
const uint8_t *restrict in, uint8_t *restrict out, size_t size)
{
- size_t i;
const size_t distance = coder->distance;
- for (i = 0; i < size; ++i) {
+ for (size_t i = 0; i < size; ++i) {
const uint8_t tmp = coder->history[
(distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = in[i];
@@ -38,10 +37,9 @@ copy_and_encode(lzma_coder *coder,
static void
encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size)
{
- size_t i;
const size_t distance = coder->distance;
- for (i = 0; i < size; ++i) {
+ for (size_t i = 0; i < size; ++i) {
const uint8_t tmp = coder->history[
(distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = buffer[i];
@@ -111,13 +109,12 @@ lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern lzma_ret
lzma_delta_props_encode(const void *options, uint8_t *out)
{
- const lzma_options_delta *opt = options;
-
// The caller must have already validated the options, so it's
// LZMA_PROG_ERROR if they are invalid.
if (lzma_delta_coder_memusage(options) == UINT64_MAX)
return LZMA_PROG_ERROR;
+ const lzma_options_delta *opt = options;
out[0] = opt->dist - LZMA_DELTA_DIST_MIN;
return LZMA_OK;
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_decoder.c b/Utilities/cmliblzma/liblzma/lz/lz_decoder.c
index f45984e..d74085c 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_decoder.c
+++ b/Utilities/cmliblzma/liblzma/lz/lz_decoder.c
@@ -69,17 +69,13 @@ decode_buffer(lzma_coder *coder,
size_t *restrict out_pos, size_t out_size)
{
while (true) {
- size_t copy_size;
- size_t dict_start;
- lzma_ret ret;
-
// Wrap the dictionary if needed.
if (coder->dict.pos == coder->dict.size)
coder->dict.pos = 0;
// Store the current dictionary position. It is needed to know
// where to start copying to the out[] buffer.
- dict_start = coder->dict.pos;
+ const size_t dict_start = coder->dict.pos;
// Calculate how much we allow coder->lz.code() to decode.
// It must not decode past the end of the dictionary
@@ -90,13 +86,13 @@ decode_buffer(lzma_coder *coder,
coder->dict.size - coder->dict.pos);
// Call the coder->lz.code() to do the actual decoding.
- ret = coder->lz.code(
+ const lzma_ret ret = coder->lz.code(
coder->lz.coder, &coder->dict,
in, in_pos, in_size);
// Copy the decoded data from the dictionary to the out[]
// buffer.
- copy_size = coder->dict.pos - dict_start;
+ const size_t copy_size = coder->dict.pos - dict_start;
assert(copy_size <= out_size - *out_pos);
memcpy(out + *out_pos, coder->dict.buf + dict_start,
copy_size);
@@ -143,15 +139,13 @@ lz_decode(lzma_coder *coder,
// We aren't the last coder in the chain, we need to decode
// our input to a temporary buffer.
while (*out_pos < out_size) {
- lzma_ret ret;
-
// Fill the temporary buffer if it is empty.
if (!coder->next_finished
&& coder->temp.pos == coder->temp.size) {
coder->temp.pos = 0;
coder->temp.size = 0;
- ret = coder->next.code(
+ const lzma_ret ret = coder->next.code(
coder->next.coder,
allocator, in, in_pos, in_size,
coder->temp.buffer, &coder->temp.size,
@@ -173,7 +167,7 @@ lz_decode(lzma_coder *coder,
return LZMA_OK;
}
- ret = decode_buffer(coder, coder->temp.buffer,
+ const lzma_ret ret = decode_buffer(coder, coder->temp.buffer,
&coder->temp.pos, coder->temp.size,
out, out_pos, out_size);
@@ -212,8 +206,6 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
lzma_allocator *allocator, const void *options,
lzma_lz_options *lz_options))
{
- lzma_lz_options lz_options;
-
// Allocate the base structure if it isn't already allocated.
if (next->coder == NULL) {
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
@@ -231,6 +223,7 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// Allocate and initialize the LZ-based decoder. It will also give
// us the dictionary size.
+ lzma_lz_options lz_options;
return_if_error(lz_init(&next->coder->lz, allocator,
filters[0].options, &lz_options));
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_decoder.h b/Utilities/cmliblzma/liblzma/lz/lz_decoder.h
index 2d19cac..7266e80 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_decoder.h
+++ b/Utilities/cmliblzma/liblzma/lz/lz_decoder.h
@@ -72,14 +72,14 @@ typedef struct {
} lzma_lz_decoder;
-static const lzma_lz_decoder LZMA_LZ_DECODER_INIT =
- {
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- };
+#define LZMA_LZ_DECODER_INIT \
+ (lzma_lz_decoder){ \
+ .coder = NULL, \
+ .code = NULL, \
+ .reset = NULL, \
+ .set_uncompressed = NULL, \
+ .end = NULL, \
+ }
extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next,
@@ -151,15 +151,13 @@ dict_repeat(lzma_dict *dict, uint32_t distance, uint32_t *len)
dict->pos += left;
} else {
- uint32_t copy_pos;
- uint32_t copy_size;
-
// The bigger the dictionary, the more rare this
// case occurs. We need to "wrap" the dict, thus
// we might need two memcpy() to copy all the data.
assert(dict->full == dict->size);
- copy_pos = dict->pos - distance - 1 + dict->size;
- copy_size = dict->size - copy_pos;
+ const uint32_t copy_pos
+ = dict->pos - distance - 1 + dict->size;
+ uint32_t copy_size = dict->size - copy_pos;
if (copy_size < left) {
memmove(dict->buf + dict->pos, dict->buf + copy_pos,
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder.c b/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
index a735c21..e240696 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
+++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
@@ -43,18 +43,16 @@ struct lzma_coder_s {
static void
move_window(lzma_mf *mf)
{
- uint32_t move_offset;
- size_t move_size;
-
// Align the move to a multiple of 16 bytes. Some LZ-based encoders
// like LZMA use the lowest bits of mf->read_pos to know the
// alignment of the uncompressed data. We also get better speed
// for memmove() with aligned buffers.
assert(mf->read_pos > mf->keep_size_before);
- move_offset = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
+ const uint32_t move_offset
+ = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
assert(mf->write_pos > move_offset);
- move_size = mf->write_pos - move_offset;
+ const size_t move_size = mf->write_pos - move_offset;
assert(move_offset + move_size <= mf->size);
@@ -81,9 +79,6 @@ static lzma_ret
fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
size_t *in_pos, size_t in_size, lzma_action action)
{
- size_t write_pos;
- lzma_ret ret;
-
assert(coder->mf.read_pos <= coder->mf.write_pos);
// Move the sliding window if needed.
@@ -93,7 +88,8 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
// Maybe this is ugly, but lzma_mf uses uint32_t for most things
// (which I find cleanest), but we need size_t here when filling
// the history window.
- write_pos = coder->mf.write_pos;
+ size_t write_pos = coder->mf.write_pos;
+ lzma_ret ret;
if (coder->next.code == NULL) {
// Not using a filter, simply memcpy() as much as possible.
lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
@@ -160,8 +156,6 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
{
while (*out_pos < out_size
&& (*in_pos < in_size || action != LZMA_RUN)) {
- lzma_ret ret;
-
// Read more data to coder->mf.buffer if needed.
if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
>= coder->mf.read_limit)
@@ -169,7 +163,7 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
in, in_pos, in_size, action));
// Encode
- ret = coder->lz.code(coder->lz.coder,
+ const lzma_ret ret = coder->lz.code(coder->lz.coder,
&coder->mf, out, out_pos, out_size);
if (ret != LZMA_OK) {
// Setting this to LZMA_RUN for cases when we are
@@ -188,14 +182,6 @@ static bool
lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
const lzma_lz_options *lz_options)
{
- bool is_bt;
- uint32_t new_count;
- uint32_t reserve;
- uint32_t old_size;
- uint32_t hash_bytes;
- uint32_t hs;
- uint32_t old_count;
-
// For now, the dictionary size is limited to 1.5 GiB. This may grow
// in the future if needed, but it needs a little more work than just
// changing this check.
@@ -221,14 +207,14 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// to size_t.
// - Memory usage calculation needs something too, e.g. use uint64_t
// for mf->size.
- reserve = lz_options->dict_size / 2;
+ uint32_t reserve = lz_options->dict_size / 2;
if (reserve > (UINT32_C(1) << 30))
reserve /= 2;
reserve += (lz_options->before_size + lz_options->match_len_max
+ lz_options->after_size) / 2 + (UINT32_C(1) << 19);
- old_size = mf->size;
+ const uint32_t old_size = mf->size;
mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
// Deallocate the old history buffer if it exists but has different
@@ -298,11 +284,12 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// Calculate the sizes of mf->hash and mf->son and check that
// nice_len is big enough for the selected match finder.
- hash_bytes = lz_options->match_finder & 0x0F;
+ const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
if (hash_bytes > mf->nice_len)
return true;
- is_bt = (lz_options->match_finder & 0x10) != 0;
+ const bool is_bt = (lz_options->match_finder & 0x10) != 0;
+ uint32_t hs;
if (hash_bytes == 2) {
hs = 0xFFFF;
@@ -344,13 +331,13 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// hash_size_sum + sons_count cannot overflow.
assert(hs < UINT32_MAX / 5);
- old_count = mf->hash_size_sum + mf->sons_count;
+ const uint32_t old_count = mf->hash_size_sum + mf->sons_count;
mf->hash_size_sum = hs;
mf->sons_count = mf->cyclic_size;
if (is_bt)
mf->sons_count *= 2;
- new_count = mf->hash_size_sum + mf->sons_count;
+ const uint32_t new_count = mf->hash_size_sum + mf->sons_count;
// Deallocate the old hash array if it exists and has different size
// than what is needed now.
@@ -376,8 +363,6 @@ static bool
lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
const lzma_lz_options *lz_options)
{
- size_t alloc_count;
-
// Allocate the history buffer.
if (mf->buffer == NULL) {
mf->buffer = lzma_alloc(mf->size, allocator);
@@ -397,7 +382,7 @@ lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
mf->pending = 0;
// Allocate match finder's hash array.
- alloc_count = mf->hash_size_sum + mf->sons_count;
+ const size_t alloc_count = mf->hash_size_sum + mf->sons_count;
#if UINT32_MAX >= SIZE_MAX / 4
// Check for integer overflow. (Huge dictionaries are not
@@ -457,7 +442,12 @@ extern uint64_t
lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
{
// Old buffers must not exist when calling lz_encoder_prepare().
- lzma_mf mf = { NULL };
+ lzma_mf mf = {
+ .buffer = NULL,
+ .hash = NULL,
+ .hash_size_sum = 0,
+ .sons_count = 0,
+ };
// Setup the size information into mf.
if (lz_encoder_prepare(&mf, NULL, lz_options))
@@ -511,8 +501,6 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
lzma_allocator *allocator, const void *options,
lzma_lz_options *lz_options))
{
- lzma_lz_options lz_options;
-
#ifdef HAVE_SMALL
// We need that the CRC32 table has been initialized.
lzma_crc32_init();
@@ -541,6 +529,7 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
}
// Initialize the LZ-based encoder.
+ lzma_lz_options lz_options;
return_if_error(lz_init(&next->coder->lz, allocator,
filters[0].options, &lz_options));
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder.h b/Utilities/cmliblzma/liblzma/lz/lz_encoder.h
index 647f5e2..741c453 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_encoder.h
+++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder.h
@@ -218,7 +218,7 @@ typedef struct {
/// Get pointer to the first byte not ran through the match finder
-static inline uint8_t *
+static inline const uint8_t *
mf_ptr(const lzma_mf *mf)
{
return mf->buffer + mf->read_pos;
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h b/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h
index de17c54..342a333 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h
+++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder_hash.h
@@ -39,22 +39,25 @@
// Endianness doesn't matter in hash_2_calc() (no effect on the output).
#ifdef TUKLIB_FAST_UNALIGNED_ACCESS
# define hash_2_calc() \
- hash_value = *(const uint16_t *)(cur)
+ const uint32_t hash_value = *(const uint16_t *)(cur)
#else
# define hash_2_calc() \
- hash_value = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
+ const uint32_t hash_value \
+ = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
#endif
#define hash_3_calc() \
- temp = hash_table[cur[0]] ^ cur[1]; \
- hash_2_value = temp & HASH_2_MASK; \
- hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
+ const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
#define hash_4_calc() \
- temp = hash_table[cur[0]] ^ cur[1]; \
- hash_2_value = temp & HASH_2_MASK; \
- hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
- hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
+ const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_3_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
+ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
^ (hash_table[cur[3]] << 5)) & mf->hash_mask
diff --git a/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c b/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
index 50c3459..f82a1c1 100644
--- a/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
+++ b/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
@@ -32,9 +32,8 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
if (count > 0) {
#ifndef NDEBUG
- uint32_t i;
// Validate the matches.
- for (i = 0; i < count; ++i) {
+ for (uint32_t i = 0; i < count; ++i) {
assert(matches[i].len <= mf->nice_len);
assert(matches[i].dist < mf->read_pos);
assert(memcmp(mf_ptr(mf) - 1,
@@ -50,9 +49,6 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
// If a match of maximum search length was found, try to
// extend the match to maximum possible length.
if (len_best == mf->nice_len) {
- uint8_t *p1;
- uint8_t *p2;
-
// The limit for the match length is either the
// maximum match length supported by the LZ-based
// encoder or the number of bytes left in the
@@ -63,11 +59,11 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
// Pointer to the byte we just ran through
// the match finder.
- p1 = mf_ptr(mf) - 1;
+ const uint8_t *p1 = mf_ptr(mf) - 1;
// Pointer to the beginning of the match. We need -1
// here because the match distances are zero based.
- p2 = p1 - matches[count - 1].dist - 1;
+ const uint8_t *p2 = p1 - matches[count - 1].dist - 1;
while (len_best < limit
&& p1[len_best] == p2[len_best])
@@ -112,22 +108,18 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
static void
normalize(lzma_mf *mf)
{
- uint32_t i;
- uint32_t subvalue;
- uint32_t count;
- uint32_t *hash;
-
assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS);
// In future we may not want to touch the lowest bits, because there
// may be match finders that use larger resolution than one byte.
- subvalue = (MUST_NORMALIZE_POS - mf->cyclic_size);
+ const uint32_t subvalue
+ = (MUST_NORMALIZE_POS - mf->cyclic_size);
// & (~(UINT32_C(1) << 10) - 1);
- count = mf->hash_size_sum + mf->sons_count;
- hash = mf->hash;
+ const uint32_t count = mf->hash_size_sum + mf->sons_count;
+ uint32_t *hash = mf->hash;
- for (i = 0; i < count; ++i) {
+ for (uint32_t i = 0; i < count; ++i) {
// If the distance is greater than the dictionary size,
// we can simply mark the hash element as empty.
//
@@ -204,14 +196,15 @@ move_pending(lzma_mf *mf)
move_pending(mf); \
ret_op; \
} \
- cur = mf_ptr(mf); \
- pos = mf->read_pos + mf->offset
+ const uint8_t *cur = mf_ptr(mf); \
+ const uint32_t pos = mf->read_pos + mf->offset
/// Header for find functions. "return 0" indicates that zero matches
/// were found.
#define header_find(is_bt, len_min) \
- header(is_bt, len_min, return 0)
+ header(is_bt, len_min, return 0); \
+ uint32_t matches_count = 0
/// Header for a loop in a skip function. "continue" tells to skip the rest
@@ -268,11 +261,10 @@ hc_find_func(
while (true) {
const uint32_t delta = pos - cur_match;
- const uint8_t *pb;
if (depth-- == 0 || delta >= cyclic_size)
return matches;
- pb = cur - delta;
+ const uint8_t *const pb = cur - delta;
cur_match = son[cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0)];
@@ -313,23 +305,18 @@ do { \
extern uint32_t
lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches)
{
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
- uint32_t delta2, cur_match;
- uint32_t len_best = 2;
- uint32_t matches_count = 0;
-
header_find(false, 3);
hash_3_calc();
- delta2 = pos - mf->hash[hash_2_value];
- cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+ const uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+ uint32_t len_best = 2;
+
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
for ( ; len_best != len_limit; ++len_best)
if (*(cur + len_best - delta2) != cur[len_best])
@@ -353,22 +340,18 @@ extern void
lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
{
do {
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
- uint32_t cur_match;
-
if (mf_avail(mf) < 3) {
move_pending(mf);
continue;
}
- cur = mf_ptr(mf);
- pos = mf->read_pos + mf->offset;
+ const uint8_t *cur = mf_ptr(mf);
+ const uint32_t pos = mf->read_pos + mf->offset;
hash_3_calc();
- cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+ const uint32_t cur_match
+ = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
@@ -384,25 +367,21 @@ lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t
lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
{
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
- uint32_t delta2, delta3, cur_match;
- uint32_t len_best = 1;
- uint32_t matches_count = 0;
-
header_find(false, 4);
hash_4_calc();
- delta2 = pos - mf->hash[hash_2_value];
- delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
- cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+ uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t delta3
+ = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
+ const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value ] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+ uint32_t len_best = 1;
+
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
len_best = 2;
matches[0].len = 2;
@@ -441,22 +420,18 @@ extern void
lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount)
{
do {
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
- uint32_t cur_match;
-
if (mf_avail(mf) < 4) {
move_pending(mf);
continue;
}
- cur = mf_ptr(mf);
- pos = mf->read_pos + mf->offset;
+ const uint8_t *cur = mf_ptr(mf);
+ const uint32_t pos = mf->read_pos + mf->offset;
hash_4_calc();
- cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+ const uint32_t cur_match
+ = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
@@ -494,10 +469,6 @@ bt_find_func(
uint32_t len1 = 0;
while (true) {
- uint32_t *pair;
- const uint8_t *pb;
- uint32_t len;
-
const uint32_t delta = pos - cur_match;
if (depth-- == 0 || delta >= cyclic_size) {
*ptr0 = EMPTY_HASH_VALUE;
@@ -505,12 +476,12 @@ bt_find_func(
return matches;
}
- pair = son + ((cyclic_pos - delta
+ uint32_t *const pair = son + ((cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0))
<< 1);
- pb = cur - delta;
- len = my_min(len0, len1);
+ const uint8_t *const pb = cur - delta;
+ uint32_t len = my_min(len0, len1);
if (pb[len] == cur[len]) {
while (++len != len_limit)
@@ -564,10 +535,6 @@ bt_skip_func(
uint32_t len1 = 0;
while (true) {
- uint32_t *pair;
- const uint8_t *pb;
- uint32_t len;
-
const uint32_t delta = pos - cur_match;
if (depth-- == 0 || delta >= cyclic_size) {
*ptr0 = EMPTY_HASH_VALUE;
@@ -575,11 +542,11 @@ bt_skip_func(
return;
}
- pair = son + ((cyclic_pos - delta
+ uint32_t *pair = son + ((cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0))
<< 1);
- pb = cur - delta;
- len = my_min(len0, len1);
+ const uint8_t *pb = cur - delta;
+ uint32_t len = my_min(len0, len1);
if (pb[len] == cur[len]) {
while (++len != len_limit)
@@ -626,17 +593,11 @@ do { \
extern uint32_t
lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
{
- const uint8_t *cur;
- uint32_t pos;
- uint32_t hash_value; /* hash_2_calc */
- uint32_t cur_match;
- uint32_t matches_count = 0;
-
header_find(true, 2);
hash_2_calc();
- cur_match = mf->hash[hash_value];
+ const uint32_t cur_match = mf->hash[hash_value];
mf->hash[hash_value] = pos;
bt_find(1);
@@ -647,16 +608,11 @@ extern void
lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
{
do {
- const uint8_t *cur;
- uint32_t pos;
- uint32_t hash_value; /* hash_2_calc */
- uint32_t cur_match;
-
header_skip(true, 2);
hash_2_calc();
- cur_match = mf->hash[hash_value];
+ const uint32_t cur_match = mf->hash[hash_value];
mf->hash[hash_value] = pos;
bt_skip();
@@ -670,23 +626,18 @@ lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t
lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches)
{
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
- uint32_t delta2, cur_match;
- uint32_t len_best = 2;
- uint32_t matches_count = 0;
-
header_find(true, 3);
hash_3_calc();
- delta2 = pos - mf->hash[hash_2_value];
- cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+ const uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+ uint32_t len_best = 2;
+
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
for ( ; len_best != len_limit; ++len_best)
if (*(cur + len_best - delta2) != cur[len_best])
@@ -710,16 +661,12 @@ extern void
lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
{
do {
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
- uint32_t cur_match;
-
header_skip(true, 3);
hash_3_calc();
- cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+ const uint32_t cur_match
+ = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
@@ -735,25 +682,21 @@ lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t
lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
{
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
- uint32_t delta2, delta3, cur_match;
- uint32_t len_best = 1;
- uint32_t matches_count = 0;
-
header_find(true, 4);
hash_4_calc();
- delta2 = pos - mf->hash[hash_2_value];
- delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
- cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+ uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t delta3
+ = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
+ const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+ uint32_t len_best = 1;
+
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
len_best = 2;
matches[0].len = 2;
@@ -792,16 +735,12 @@ extern void
lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
{
do {
- const uint8_t *cur;
- uint32_t pos;
- uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
- uint32_t cur_match;
-
header_skip(true, 4);
hash_4_calc();
- cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+ const uint32_t cur_match
+ = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
diff --git a/Utilities/cmliblzma/liblzma/lzma/fastpos.h b/Utilities/cmliblzma/liblzma/lzma/fastpos.h
index 5a834d6..4aea231 100644
--- a/Utilities/cmliblzma/liblzma/lzma/fastpos.h
+++ b/Utilities/cmliblzma/liblzma/lzma/fastpos.h
@@ -75,8 +75,6 @@
// on all systems I have tried. The size optimized version is sometimes
// slightly faster, but sometimes it is a lot slower.
-#include "config.h"
-
#ifdef HAVE_SMALL
# define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos))
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
index ca14c4a..3e42575 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
@@ -224,8 +224,6 @@ static lzma_ret
lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options)
{
- const lzma_options_lzma *options = opt;
-
if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL)
@@ -237,6 +235,8 @@ lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
lz->coder->lzma = LZMA_LZ_DECODER_INIT;
}
+ const lzma_options_lzma *options = opt;
+
lz->coder->sequence = SEQ_CONTROL;
lz->coder->need_properties = true;
lz->coder->need_dictionary_reset = options->preset_dict == NULL
@@ -272,8 +272,6 @@ extern lzma_ret
lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_lzma *opt;
-
if (props_size != 1)
return LZMA_OPTIONS_ERROR;
@@ -285,7 +283,8 @@ lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
if (props[0] > 40)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
+ lzma_options_lzma *opt = lzma_alloc(
+ sizeof(lzma_options_lzma), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
index 8784f5d..992720c 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
@@ -54,14 +54,13 @@ struct lzma_coder_s {
static void
lzma2_header_lzma(lzma_coder *coder)
{
- size_t pos;
- size_t size;
-
assert(coder->uncompressed_size > 0);
assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
assert(coder->compressed_size > 0);
assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
+ size_t pos;
+
if (coder->need_properties) {
pos = 0;
@@ -82,7 +81,7 @@ lzma2_header_lzma(lzma_coder *coder)
coder->buf_pos = pos;
// Uncompressed size
- size = coder->uncompressed_size - 1;
+ size_t size = coder->uncompressed_size - 1;
coder->buf[pos++] += size >> 16;
coder->buf[pos++] = (size >> 8) & 0xFF;
coder->buf[pos++] = size & 0xFF;
@@ -163,9 +162,6 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Fall through
case SEQ_LZMA_ENCODE: {
- uint32_t read_start;
- lzma_ret ret;
-
// Calculate how much more uncompressed data this chunk
// could accept.
const uint32_t left = LZMA2_UNCOMPRESSED_MAX
@@ -186,10 +182,10 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Save the start position so that we can update
// coder->uncompressed_size.
- read_start = mf->read_pos - mf->read_ahead;
+ const uint32_t read_start = mf->read_pos - mf->read_ahead;
// Call the LZMA encoder until the chunk is finished.
- ret = lzma_lzma_encode(coder->lzma, mf,
+ const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf,
coder->buf + LZMA2_HEADER_MAX,
&coder->compressed_size,
LZMA2_CHUNK_MAX, limit);
@@ -277,8 +273,6 @@ lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
static lzma_ret
lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
{
- lzma_options_lzma *opt;
-
// New options can be set only when there is no incomplete chunk.
// This is the case at the beginning of the raw stream and right
// after LZMA_SYNC_FLUSH.
@@ -287,7 +281,7 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
// Look if there are new options. At least for now,
// only lc/lp/pb can be changed.
- opt = filter->options;
+ const lzma_options_lzma *opt = filter->options;
if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp
|| coder->opt_cur.pb != opt->pb) {
// Validate the options.
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
index 36267dc..e31e285 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
@@ -129,15 +129,12 @@ static inline void
literal_init(probability (*probs)[LITERAL_CODER_SIZE],
uint32_t lc, uint32_t lp)
{
- uint32_t coders;
- uint32_t i, j;
-
assert(lc + lp <= LZMA_LCLP_MAX);
- coders = 1U << (lc + lp);
+ const uint32_t coders = 1U << (lc + lp);
- for (i = 0; i < coders; ++i)
- for (j = 0; j < LITERAL_CODER_SIZE; ++j)
+ for (uint32_t i = 0; i < coders; ++i)
+ for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j)
bit_reset(probs[i][j]);
return;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
index 1bee2a9..9979bb4 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
@@ -114,33 +114,33 @@ do { \
case seq ## _CHOICE: \
rc_if_0(ld.choice, seq ## _CHOICE) { \
rc_update_0(ld.choice); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW0); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW1); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW2); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \
target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \
} else { \
rc_update_1(ld.choice); \
case seq ## _CHOICE2: \
rc_if_0(ld.choice2, seq ## _CHOICE2) { \
rc_update_0(ld.choice2); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID0); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID1); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID2); \
target = symbol - LEN_MID_SYMBOLS \
+ MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
} else { \
rc_update_1(ld.choice2); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH0); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH1); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH2); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH3); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH4); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH5); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH6); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH7); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \
target = symbol - LEN_HIGH_SYMBOLS \
+ MATCH_LEN_MIN \
+ LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \
@@ -285,6 +285,13 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
const uint8_t *restrict in,
size_t *restrict in_pos, size_t in_size)
{
+ ////////////////////
+ // Initialization //
+ ////////////////////
+
+ if (!rc_read_init(&coder->rc, in, in_pos, in_size))
+ return LZMA_OK;
+
///////////////
// Variables //
///////////////
@@ -331,16 +338,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos)
dict.limit = dict.pos + (size_t)(coder->uncompressed_size);
- ////////////////////
- // Initialization //
- ////////////////////
-
- if (!rc_read_init(&coder->rc, in, in_pos, in_size))
- return LZMA_OK;
-
- rc = coder->rc;
- rc_in_pos = *in_pos;
-
// The main decoder loop. The "switch" is used to restart the decoder at
// correct location. Once restarted, the "switch" is no longer used.
switch (coder->sequence)
@@ -356,21 +353,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
break;
rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) {
- static const lzma_lzma_state next_state[] = {
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_MATCH_LIT_LIT,
- STATE_REP_LIT_LIT,
- STATE_SHORTREP_LIT_LIT,
- STATE_MATCH_LIT,
- STATE_REP_LIT,
- STATE_SHORTREP_LIT,
- STATE_MATCH_LIT,
- STATE_REP_LIT
- };
-
rc_update_0(coder->is_match[state][pos_state]);
// It's a literal i.e. a single 8-bit byte.
@@ -388,21 +370,16 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_LITERAL);
} while (symbol < (1 << 8));
#else
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL0);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL1);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL2);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL3);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL4);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL5);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL6);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL7);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL0);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL1);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL2);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL3);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL4);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL5);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL6);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL7);
#endif
} else {
-#ifndef HAVE_SMALL
- uint32_t match_bit;
- uint32_t subcoder_index;
-#endif
-
// Decode literal with match byte.
//
// We store the byte we compare against
@@ -441,6 +418,8 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (symbol < (1 << 8));
#else
// Unroll the loop.
+ uint32_t match_bit;
+ uint32_t subcoder_index;
# define d(seq) \
case seq: \
@@ -474,6 +453,20 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// Use a lookup table to update to literal state,
// since compared to other state updates, this would
// need two branches.
+ static const lzma_lzma_state next_state[] = {
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_MATCH_LIT_LIT,
+ STATE_REP_LIT_LIT,
+ STATE_SHORTREP_LIT_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT,
+ STATE_SHORTREP_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT
+ };
state = next_state[state];
case SEQ_LITERAL_WRITE:
@@ -518,12 +511,12 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_POS_SLOT);
} while (symbol < POS_SLOTS);
#else
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT0);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT1);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT2);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT3);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT4);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT5);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT0);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT1);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT2);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT3);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT4);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT5);
#endif
// Get rid of the highest bit that was needed for
// indexing of the probability array.
@@ -571,25 +564,25 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
switch (limit) {
case 5:
assert(offset == 0);
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1,
SEQ_POS_MODEL);
++offset;
--limit;
case 4:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
--limit;
case 3:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
--limit;
case 2:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
@@ -601,7 +594,7 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// rc_bit_last() here to omit
// the unneeded updating of
// "symbol".
- rc_bit_last(probs[symbol], 0,
+ rc_bit_last(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
}
@@ -635,19 +628,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (++offset < ALIGN_BITS);
#else
case SEQ_ALIGN0:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 1, SEQ_ALIGN0);
case SEQ_ALIGN1:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 2, SEQ_ALIGN1);
case SEQ_ALIGN2:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 4, SEQ_ALIGN2);
case SEQ_ALIGN3:
// Like in SEQ_POS_MODEL, we don't
// need "symbol" for anything else
// than indexing the probability array.
- rc_bit_last(coder->pos_align[symbol], 0,
+ rc_bit_last(coder->pos_align[symbol], ,
rep0 += 8, SEQ_ALIGN3);
#endif
@@ -732,11 +725,9 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// is stored to rep0 and rep1, rep2 and rep3
// are updated accordingly.
rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) {
- uint32_t distance;
-
rc_update_0(coder->is_rep1[state]);
- distance = rep1;
+ const uint32_t distance = rep1;
rep1 = rep0;
rep0 = distance;
@@ -745,23 +736,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
case SEQ_IS_REP2:
rc_if_0(coder->is_rep2[state],
SEQ_IS_REP2) {
- uint32_t distance;
-
rc_update_0(coder->is_rep2[
state]);
- distance = rep2;
+ const uint32_t distance = rep2;
rep2 = rep1;
rep1 = rep0;
rep0 = distance;
} else {
- uint32_t distance;
-
rc_update_1(coder->is_rep2[
state]);
- distance = rep3;
+ const uint32_t distance = rep3;
rep3 = rep2;
rep2 = rep1;
rep1 = rep0;
@@ -866,9 +853,6 @@ lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
static void
lzma_decoder_reset(lzma_coder *coder, const void *opt)
{
- uint32_t i, j, pos_state;
- uint32_t num_pos_states;
-
const lzma_options_lzma *options = opt;
// NOTE: We assume that lc/lp/pb are valid since they were
@@ -895,8 +879,8 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
rc_reset(coder->rc);
// Bit and bittree decoders
- for (i = 0; i < STATES; ++i) {
- for (j = 0; j <= coder->pos_mask; ++j) {
+ for (uint32_t i = 0; i < STATES; ++i) {
+ for (uint32_t j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]);
}
@@ -907,22 +891,22 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
bit_reset(coder->is_rep2[i]);
}
- for (i = 0; i < LEN_TO_POS_STATES; ++i)
+ for (uint32_t i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
- for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ for (uint32_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]);
bittree_reset(coder->pos_align, ALIGN_BITS);
// Len decoders (also bit/bittree)
- num_pos_states = 1U << options->pb;
+ const uint32_t num_pos_states = 1U << options->pb;
bit_reset(coder->match_len_decoder.choice);
bit_reset(coder->match_len_decoder.choice2);
bit_reset(coder->rep_len_decoder.choice);
bit_reset(coder->rep_len_decoder.choice2);
- for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(coder->match_len_decoder.low[pos_state],
LEN_LOW_BITS);
bittree_reset(coder->match_len_decoder.mid[pos_state],
@@ -952,8 +936,6 @@ extern lzma_ret
lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options)
{
- const lzma_options_lzma *options = opt;
-
if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL)
@@ -966,6 +948,7 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
// All dictionary sizes are OK here. LZ decoder will take care of
// the special cases.
+ const lzma_options_lzma *options = opt;
lz_options->dict_size = options->dict_size;
lz_options->preset_dict = options->preset_dict;
lz_options->preset_dict_size = options->preset_dict_size;
@@ -1045,12 +1028,11 @@ extern lzma_ret
lzma_lzma_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_lzma *opt;
-
if (props_size != 5)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
+ lzma_options_lzma *opt
+ = lzma_alloc(sizeof(lzma_options_lzma), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
index 6186f83..0b9ee9e 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
@@ -28,14 +28,11 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder,
symbol += UINT32_C(1) << 8;
do {
- uint32_t match_bit;
- uint32_t subcoder_index;
- uint32_t bit;
-
match_byte <<= 1;
- match_bit = match_byte & offset;
- subcoder_index = offset + match_bit + (symbol >> 8);
- bit = (symbol >> 7) & 1;
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
rc_bit(rc, &subcoder[subcoder_index], bit);
symbol <<= 1;
@@ -80,19 +77,16 @@ literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
static void
length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
{
- uint32_t a0, a1, b0, b1;
- uint32_t *prices;
- uint32_t i;
-
const uint32_t table_size = lc->table_size;
lc->counters[pos_state] = table_size;
- a0 = rc_bit_0_price(lc->choice);
- a1 = rc_bit_1_price(lc->choice);
- b0 = a1 + rc_bit_0_price(lc->choice2);
- b1 = a1 + rc_bit_1_price(lc->choice2);
- prices = lc->prices[pos_state];
+ const uint32_t a0 = rc_bit_0_price(lc->choice);
+ const uint32_t a1 = rc_bit_1_price(lc->choice);
+ const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2);
+ const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2);
+ uint32_t *const prices = lc->prices[pos_state];
+ uint32_t i;
for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i)
prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
LEN_LOW_BITS, i);
@@ -149,16 +143,13 @@ static inline void
match(lzma_coder *coder, const uint32_t pos_state,
const uint32_t distance, const uint32_t len)
{
- uint32_t pos_slot;
- uint32_t len_to_pos_state;
-
update_match(coder->state);
length(&coder->rc, &coder->match_len_encoder, pos_state, len,
coder->fast_mode);
- pos_slot = get_pos_slot(distance);
- len_to_pos_state = get_len_to_pos_state(len);
+ const uint32_t pos_slot = get_pos_slot(distance);
+ const uint32_t len_to_pos_state = get_len_to_pos_state(len);
rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state],
POS_SLOT_BITS, pos_slot);
@@ -322,19 +313,14 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint8_t *restrict out, size_t *restrict out_pos,
size_t out_size, uint32_t limit)
{
- uint32_t position;
-
// Initialize the stream if no data has been encoded yet.
if (!coder->is_initialized && !encode_init(coder, mf))
return LZMA_OK;
// Get the lowest bits of the uncompressed offset from the LZ layer.
- position = mf_position(mf);
+ uint32_t position = mf_position(mf);
while (true) {
- uint32_t len;
- uint32_t back;
-
// Encode pending bits, if any. Calling this before encoding
// the next symbol is needed only with plain LZMA, since
// LZMA2 always provides big enough buffer to flush
@@ -373,6 +359,8 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// - UINT32_MAX: not a match but a literal
// Value ranges for len:
// - [MATCH_LEN_MIN, MATCH_LEN_MAX]
+ uint32_t len;
+ uint32_t back;
if (coder->fast_mode)
lzma_lzma_optimum_fast(coder, mf, &back, &len);
@@ -465,12 +453,10 @@ static void
length_encoder_reset(lzma_length_encoder *lencoder,
const uint32_t num_pos_states, const bool fast_mode)
{
- size_t pos_state;
-
bit_reset(lencoder->choice);
bit_reset(lencoder->choice2);
- for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS);
bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS);
}
@@ -478,7 +464,7 @@ length_encoder_reset(lzma_length_encoder *lencoder,
bittree_reset(lencoder->high, LEN_HIGH_BITS);
if (!fast_mode)
- for (pos_state = 0; pos_state < num_pos_states;
+ for (size_t pos_state = 0; pos_state < num_pos_states;
++pos_state)
length_update_prices(lencoder, pos_state);
@@ -489,8 +475,6 @@ length_encoder_reset(lzma_length_encoder *lencoder,
extern lzma_ret
lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
{
- size_t i, j;
-
if (!is_options_valid(options))
return LZMA_OPTIONS_ERROR;
@@ -503,14 +487,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
// State
coder->state = STATE_LIT_LIT;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (size_t i = 0; i < REP_DISTANCES; ++i)
coder->reps[i] = 0;
literal_init(coder->literal, options->lc, options->lp);
// Bit encoders
- for (i = 0; i < STATES; ++i) {
- for (j = 0; j <= coder->pos_mask; ++j) {
+ for (size_t i = 0; i < STATES; ++i) {
+ for (size_t j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]);
}
@@ -521,11 +505,11 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
bit_reset(coder->is_rep2[i]);
}
- for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ for (size_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]);
// Bit tree encoders
- for (i = 0; i < LEN_TO_POS_STATES; ++i)
+ for (size_t i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
bittree_reset(coder->pos_align, ALIGN_BITS);
@@ -564,9 +548,6 @@ extern lzma_ret
lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
const lzma_options_lzma *options, lzma_lz_options *lz_options)
{
- lzma_coder *coder;
- uint32_t log_size = 0;
-
// Allocate lzma_coder if it wasn't already allocated.
if (*coder_ptr == NULL) {
*coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator);
@@ -574,7 +555,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
return LZMA_MEM_ERROR;
}
- coder = *coder_ptr;
+ lzma_coder *coder = *coder_ptr;
// Set compression mode. We haven't validates the options yet,
// but it's OK here, since nothing bad happens with invalid
@@ -590,6 +571,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
// Set dist_table_size.
// Round the dictionary size up to next 2^n.
+ uint32_t log_size = 0;
while ((UINT32_C(1) << log_size) < options->dict_size)
++log_size;
@@ -643,15 +625,13 @@ lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern uint64_t
lzma_lzma_encoder_memusage(const void *options)
{
- lzma_lz_options lz_options;
- uint64_t lz_memusage;
-
if (!is_options_valid(options))
return UINT64_MAX;
+ lzma_lz_options lz_options;
set_lz_options(&lz_options, options);
- lz_memusage = lzma_lz_encoder_memusage(&lz_options);
+ const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options);
if (lz_memusage == UINT64_MAX)
return UINT64_MAX;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
index 52c26e4..f835f69 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
@@ -20,14 +20,6 @@ extern void
lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res)
{
- const uint8_t *buf;
- uint32_t buf_avail;
- uint32_t i;
- uint32_t rep_len = 0;
- uint32_t rep_index = 0;
- uint32_t back_main = 0;
- uint32_t limit;
-
const uint32_t nice_len = mf->nice_len;
uint32_t len_main;
@@ -40,8 +32,8 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count;
}
- buf = mf_ptr(mf) - 1;
- buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
+ const uint8_t *buf = mf_ptr(mf) - 1;
+ const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) {
// There's not enough input left to encode a match.
@@ -51,9 +43,10 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
// Look for repeated matches; scan the previous four match distances
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len;
+ uint32_t rep_len = 0;
+ uint32_t rep_index = 0;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
// Pointer to the beginning of the match candidate
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
@@ -64,6 +57,7 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// The first two bytes matched.
// Calculate the length of the match.
+ uint32_t len;
for (len = 2; len < buf_avail
&& buf[len] == buf_back[len]; ++len) ;
@@ -92,6 +86,7 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
return;
}
+ uint32_t back_main = 0;
if (len_main >= 2) {
back_main = coder->matches[matches_count - 1].dist;
@@ -158,16 +153,15 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the old buf pointer instead of recalculating it with mf_ptr().
++buf;
- limit = len_main - 1;
-
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len;
+ const uint32_t limit = len_main - 1;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back))
continue;
+ uint32_t len;
for (len = 2; len < limit
&& buf[len] == buf_back[len]; ++len) ;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
index d2829a2..7e85649 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
@@ -35,15 +35,12 @@ get_literal_price(const lzma_coder *const coder, const uint32_t pos,
symbol += UINT32_C(1) << 8;
do {
- uint32_t match_bit;
- uint32_t subcoder_index;
- uint32_t bit;
-
match_byte <<= 1;
- match_bit = match_byte & offset;
- subcoder_index = offset + match_bit + (symbol >> 8);
- bit = (symbol >> 7) & 1;
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
price += rc_bit_price(subcoder[subcoder_index], bit);
symbol <<= 1;
@@ -134,11 +131,7 @@ get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
static void
fill_distances_prices(lzma_coder *coder)
{
- uint32_t len_to_pos_state;
- uint32_t pos_slot;
- uint32_t i;
-
- for (len_to_pos_state = 0;
+ for (uint32_t len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state) {
@@ -146,7 +139,7 @@ fill_distances_prices(lzma_coder *coder)
= coder->pos_slot_prices[len_to_pos_state];
// Price to encode the pos_slot.
- for (pos_slot = 0;
+ for (uint32_t pos_slot = 0;
pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] = rc_bittree_price(
coder->pos_slot[len_to_pos_state],
@@ -155,7 +148,7 @@ fill_distances_prices(lzma_coder *coder)
// For matches with distance >= FULL_DISTANCES, add the price
// of the direct bits part of the match distance. (Align bits
// are handled by fill_align_prices()).
- for (pos_slot = END_POS_MODEL_INDEX;
+ for (uint32_t pos_slot = END_POS_MODEL_INDEX;
pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] += rc_direct_price(
((pos_slot >> 1) - 1) - ALIGN_BITS);
@@ -163,7 +156,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [0, 3] are fully encoded with
// pos_slot, so they are used for coder->distances_prices
// as is.
- for (i = 0; i < START_POS_MODEL_INDEX; ++i)
+ for (uint32_t i = 0; i < START_POS_MODEL_INDEX; ++i)
coder->distances_prices[len_to_pos_state][i]
= pos_slot_prices[i];
}
@@ -171,7 +164,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [4, 127] depend on pos_slot and pos_special.
// We do this in a loop separate from the above loop to avoid
// redundant calls to get_pos_slot().
- for (i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
+ for (uint32_t i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
const uint32_t pos_slot = get_pos_slot(i);
const uint32_t footer_bits = ((pos_slot >> 1) - 1);
const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
@@ -179,7 +172,7 @@ fill_distances_prices(lzma_coder *coder)
coder->pos_special + base - pos_slot - 1,
footer_bits, i - base);
- for (len_to_pos_state = 0;
+ for (uint32_t len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state)
coder->distances_prices[len_to_pos_state][i]
@@ -195,8 +188,7 @@ fill_distances_prices(lzma_coder *coder)
static void
fill_align_prices(lzma_coder *coder)
{
- uint32_t i;
- for (i = 0; i < ALIGN_TABLE_SIZE; ++i)
+ for (uint32_t i = 0; i < ALIGN_TABLE_SIZE; ++i)
coder->align_prices[i] = rc_bittree_reverse_price(
coder->pos_align, ALIGN_BITS, i);
@@ -233,15 +225,12 @@ static void
backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
uint32_t *restrict back_res, uint32_t cur)
{
+ coder->opts_end_index = cur;
+
uint32_t pos_mem = coder->opts[cur].pos_prev;
uint32_t back_mem = coder->opts[cur].back_prev;
- coder->opts_end_index = cur;
-
do {
- const uint32_t pos_prev = pos_mem;
- const uint32_t back_cur = back_mem;
-
if (coder->opts[cur].prev_1_is_literal) {
make_literal(&coder->opts[pos_mem]);
coder->opts[pos_mem].pos_prev = pos_mem - 1;
@@ -256,6 +245,9 @@ backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
}
}
+ const uint32_t pos_prev = pos_mem;
+ const uint32_t back_cur = back_mem;
+
back_mem = coder->opts[pos_prev].back_prev;
pos_mem = coder->opts[pos_prev].pos_prev;
@@ -282,23 +274,6 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position)
{
- uint32_t buf_avail;
- const uint8_t *buf;
- uint32_t rep_lens[REP_DISTANCES];
- uint32_t rep_max_index = 0;
- uint32_t i;
-
- uint8_t current_byte;
- uint8_t match_byte;
-
- uint32_t pos_state;
- uint32_t match_price;
- uint32_t rep_match_price;
- uint32_t len_end;
- uint32_t len;
-
- uint32_t normal_match_price;
-
const uint32_t nice_len = mf->nice_len;
uint32_t len_main;
@@ -312,18 +287,19 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count;
}
- buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
+ const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) {
*back_res = UINT32_MAX;
*len_res = 1;
return UINT32_MAX;
}
- buf = mf_ptr(mf) - 1;
+ const uint8_t *const buf = mf_ptr(mf) - 1;
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len_test;
+ uint32_t rep_lens[REP_DISTANCES];
+ uint32_t rep_max_index = 0;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back)) {
@@ -331,6 +307,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
continue;
}
+ uint32_t len_test;
for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test];
++len_test) ;
@@ -356,8 +333,8 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
return UINT32_MAX;
}
- current_byte = *buf;
- match_byte = *(buf - coder->reps[0] - 1);
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - coder->reps[0] - 1);
if (len_main < 2 && current_byte != match_byte
&& rep_lens[rep_max_index] < 2) {
@@ -368,7 +345,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[0].state = coder->state;
- pos_state = position & coder->pos_mask;
+ const uint32_t pos_state = position & coder->pos_mask;
coder->opts[1].price = rc_bit_0_price(
coder->is_match[coder->state][pos_state])
@@ -378,9 +355,9 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
make_literal(&coder->opts[1]);
- match_price = rc_bit_1_price(
+ const uint32_t match_price = rc_bit_1_price(
coder->is_match[coder->state][pos_state]);
- rep_match_price = match_price
+ const uint32_t rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[coder->state]);
if (match_byte == current_byte) {
@@ -394,7 +371,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
}
- len_end = my_max(len_main, rep_lens[rep_max_index]);
+ const uint32_t len_end = my_max(len_main, rep_lens[rep_max_index]);
if (len_end < 2) {
*back_res = coder->opts[1].back_prev;
@@ -404,23 +381,21 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[1].pos_prev = 0;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
coder->opts[0].backs[i] = coder->reps[i];
- len = len_end;
+ uint32_t len = len_end;
do {
coder->opts[len].price = RC_INFINITY_PRICE;
} while (--len >= 2);
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t price;
-
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
uint32_t rep_len = rep_lens[i];
if (rep_len < 2)
continue;
- price = rep_match_price + get_pure_rep_price(
+ const uint32_t price = rep_match_price + get_pure_rep_price(
coder, i, coder->state, pos_state);
do {
@@ -439,7 +414,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
- normal_match_price = match_price
+ const uint32_t normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[coder->state]);
len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2;
@@ -481,19 +456,6 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
uint32_t new_len = coder->longest_match_length;
uint32_t pos_prev = coder->opts[cur].pos_prev;
lzma_lzma_state state;
- uint32_t buf_avail;
- uint32_t rep_index;
- uint32_t i;
-
- uint32_t cur_price;
- uint8_t current_byte;
- uint8_t match_byte;
- uint32_t pos_state;
- uint32_t cur_and_1_price;
- bool next_is_literal = false;
- uint32_t match_price;
- uint32_t rep_match_price;
- uint32_t start_len = 2;
if (coder->opts[cur].prev_1_is_literal) {
--pos_prev;
@@ -537,10 +499,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
}
if (pos < REP_DISTANCES) {
- uint32_t i;
-
reps[0] = coder->opts[pos_prev].backs[pos];
+ uint32_t i;
for (i = 1; i <= pos; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1];
@@ -550,28 +511,30 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
} else {
reps[0] = pos - REP_DISTANCES;
- for (i = 1; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 1; i < REP_DISTANCES; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1];
}
}
coder->opts[cur].state = state;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
coder->opts[cur].backs[i] = reps[i];
- cur_price = coder->opts[cur].price;
+ const uint32_t cur_price = coder->opts[cur].price;
- current_byte = *buf;
- match_byte = *(buf - reps[0] - 1);
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - reps[0] - 1);
- pos_state = position & coder->pos_mask;
+ const uint32_t pos_state = position & coder->pos_mask;
- cur_and_1_price = cur_price
+ const uint32_t cur_and_1_price = cur_price
+ rc_bit_0_price(coder->is_match[state][pos_state])
+ get_literal_price(coder, position, buf[-1],
!is_literal_state(state), match_byte, current_byte);
+ bool next_is_literal = false;
+
if (cur_and_1_price < coder->opts[cur + 1].price) {
coder->opts[cur + 1].price = cur_and_1_price;
coder->opts[cur + 1].pos_prev = cur;
@@ -579,9 +542,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
next_is_literal = true;
}
- match_price = cur_price
+ const uint32_t match_price = cur_price
+ rc_bit_1_price(coder->is_match[state][pos_state]);
- rep_match_price = match_price
+ const uint32_t rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[state]);
if (match_byte == current_byte
@@ -602,7 +565,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (buf_avail_full < 2)
return len_end;
- buf_avail = my_min(buf_avail_full, nice_len);
+ const uint32_t buf_avail = my_min(buf_avail_full, nice_len);
if (!next_is_literal && match_byte != current_byte) { // speed optimization
// try literal + rep0
@@ -616,26 +579,21 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
--len_test;
if (len_test >= 2) {
- uint32_t pos_state_next;
- uint32_t next_rep_match_price;
- uint32_t offset;
- uint32_t cur_and_len_price;
-
lzma_lzma_state state_2 = state;
update_literal(state_2);
- pos_state_next = (position + 1) & coder->pos_mask;
- next_rep_match_price = cur_and_1_price
+ const uint32_t pos_state_next = (position + 1) & coder->pos_mask;
+ const uint32_t next_rep_match_price = cur_and_1_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
//for (; len_test >= 2; --len_test) {
- offset = cur + 1 + len_test;
+ const uint32_t offset = cur + 1 + len_test;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- cur_and_len_price = next_rep_match_price
+ const uint32_t cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test,
state_2, pos_state_next);
@@ -651,14 +609,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
}
- for (rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
- uint32_t len_test, len_test_2, len_test_temp;
- uint32_t price, limit;
+ uint32_t start_len = 2; // speed optimization
+ for (uint32_t rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
const uint8_t *const buf_back = buf - reps[rep_index] - 1;
if (not_equal_16(buf, buf_back))
continue;
+ uint32_t len_test;
for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test];
++len_test) ;
@@ -666,8 +624,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
while (len_end < cur + len_test)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- len_test_temp = len_test;
- price = rep_match_price + get_pure_rep_price(
+ const uint32_t len_test_temp = len_test;
+ const uint32_t price = rep_match_price + get_pure_rep_price(
coder, rep_index, state, pos_state);
do {
@@ -689,8 +647,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
start_len = len_test + 1;
- len_test_2 = len_test + 1;
- limit = my_min(buf_avail_full,
+ uint32_t len_test_2 = len_test + 1;
+ const uint32_t limit = my_min(buf_avail_full,
len_test_2 + nice_len);
for (; len_test_2 < limit
&& buf[len_test_2] == buf_back[len_test_2];
@@ -699,18 +657,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1;
if (len_test_2 >= 2) {
- uint32_t pos_state_next;
- uint32_t cur_and_len_literal_price;
- uint32_t next_rep_match_price;
- uint32_t offset;
- uint32_t cur_and_len_price;
-
lzma_lzma_state state_2 = state;
update_long_rep(state_2);
- pos_state_next = (position + len_test) & coder->pos_mask;
+ uint32_t pos_state_next = (position + len_test) & coder->pos_mask;
- cur_and_len_literal_price = price
+ const uint32_t cur_and_len_literal_price = price
+ get_len_price(&coder->rep_len_encoder,
len_test, pos_state)
+ rc_bit_0_price(coder->is_match[state_2][pos_state_next])
@@ -722,17 +674,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
pos_state_next = (position + len_test + 1) & coder->pos_mask;
- next_rep_match_price = cur_and_len_literal_price
+ const uint32_t next_rep_match_price = cur_and_len_literal_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
//for(; len_test_2 >= 2; len_test_2--) {
- offset = cur + len_test + 1 + len_test_2;
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- cur_and_len_price = next_rep_match_price
+ const uint32_t cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test_2,
state_2, pos_state_next);
@@ -763,19 +715,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (new_len >= start_len) {
- uint32_t len_test;
- uint32_t i = 0;
-
const uint32_t normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[state]);
while (len_end < cur + new_len)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
+ uint32_t i = 0;
while (start_len > coder->matches[i].len)
++i;
- for (len_test = start_len; ; ++len_test) {
+ for (uint32_t len_test = start_len; ; ++len_test) {
const uint32_t cur_back = coder->matches[i].dist;
uint32_t cur_and_len_price = normal_match_price
+ get_pos_len_price(coder,
@@ -803,16 +753,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1;
if (len_test_2 >= 2) {
- uint32_t pos_state_next;
- uint32_t cur_and_len_literal_price;
- uint32_t next_rep_match_price;
- uint32_t offset;
-
lzma_lzma_state state_2 = state;
update_match(state_2);
- pos_state_next = (position + len_test) & coder->pos_mask;
+ uint32_t pos_state_next
+ = (position + len_test) & coder->pos_mask;
- cur_and_len_literal_price = cur_and_len_price
+ const uint32_t cur_and_len_literal_price = cur_and_len_price
+ rc_bit_0_price(
coder->is_match[state_2][pos_state_next])
+ get_literal_price(coder,
@@ -825,14 +771,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
update_literal(state_2);
pos_state_next = (pos_state_next + 1) & coder->pos_mask;
- next_rep_match_price
+ const uint32_t next_rep_match_price
= cur_and_len_literal_price
+ rc_bit_1_price(
coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
// for(; len_test_2 >= 2; --len_test_2) {
- offset = cur + len_test + 1 + len_test_2;
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
@@ -869,10 +815,6 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position)
{
- uint32_t reps[REP_DISTANCES];
- uint32_t len_end;
- uint32_t cur;
-
// If we have symbols pending, return the next pending symbol.
if (coder->opts_end_index != coder->opts_current_index) {
assert(mf->read_ahead > 0);
@@ -899,13 +841,14 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the original function into two pieces makes it at least a little
// more readable, since those two parts don't share many variables.
- len_end = helper1(coder, mf, back_res, len_res, position);
+ uint32_t len_end = helper1(coder, mf, back_res, len_res, position);
if (len_end == UINT32_MAX)
return;
-
+ uint32_t reps[REP_DISTANCES];
memcpy(reps, coder->reps, sizeof(reps));
+ uint32_t cur;
for (cur = 1; cur < len_end; ++cur) {
assert(cur < OPTS);
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
index 9332abf..8484b77 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
@@ -16,10 +16,6 @@
extern LZMA_API(lzma_bool)
lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
{
- static const uint8_t dict_pow2[]
- = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
- static const uint8_t depths[] = { 4, 8, 24, 48 };
-
const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK;
const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK;
const uint32_t supported_flags = LZMA_PRESET_EXTREME;
@@ -34,12 +30,15 @@ lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
options->lp = LZMA_LP_DEFAULT;
options->pb = LZMA_PB_DEFAULT;
+ static const uint8_t dict_pow2[]
+ = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
options->dict_size = UINT32_C(1) << dict_pow2[level];
if (level <= 3) {
options->mode = LZMA_MODE_FAST;
options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4;
options->nice_len = level <= 1 ? 128 : 273;
+ static const uint8_t depths[] = { 4, 8, 24, 48 };
options->depth = depths[level];
} else {
options->mode = LZMA_MODE_NORMAL;
diff --git a/Utilities/cmliblzma/liblzma/rangecoder/range_common.h b/Utilities/cmliblzma/liblzma/rangecoder/range_common.h
index f15623e..2c74dc1 100644
--- a/Utilities/cmliblzma/liblzma/rangecoder/range_common.h
+++ b/Utilities/cmliblzma/liblzma/rangecoder/range_common.h
@@ -40,11 +40,8 @@
// This does the same for a complete bit tree.
// (A tree represented as an array.)
#define bittree_reset(probs, bit_levels) \
- do { \
- uint32_t bt_i; \
- for (bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
- bit_reset((probs)[bt_i]); \
- } while (0)
+ for (uint32_t bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
+ bit_reset((probs)[bt_i])
//////////////////////
diff --git a/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h b/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h
index e9614f2..1e1c369 100644
--- a/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h
+++ b/Utilities/cmliblzma/liblzma/rangecoder/range_encoder.h
@@ -115,8 +115,7 @@ rc_direct(lzma_range_encoder *rc,
static inline void
rc_flush(lzma_range_encoder *rc)
{
- size_t i;
- for (i = 0; i < 5; ++i)
+ for (size_t i = 0; i < 5; ++i)
rc->symbols[rc->count++] = RC_FLUSH;
}
diff --git a/Utilities/cmliblzma/liblzma/simple/arm.c b/Utilities/cmliblzma/liblzma/simple/arm.c
index 8dcba39..a84702a 100644
--- a/Utilities/cmliblzma/liblzma/simple/arm.c
+++ b/Utilities/cmliblzma/liblzma/simple/arm.c
@@ -22,12 +22,12 @@ arm_code(lzma_simple *simple lzma_attribute((__unused__)),
size_t i;
for (i = 0; i + 4 <= size; i += 4) {
if (buffer[i + 3] == 0xEB) {
- uint32_t dest;
uint32_t src = (buffer[i + 2] << 16)
| (buffer[i + 1] << 8)
| (buffer[i + 0]);
src <<= 2;
+ uint32_t dest;
if (is_encoder)
dest = now_pos + (uint32_t)(i) + 8 + src;
else
diff --git a/Utilities/cmliblzma/liblzma/simple/armthumb.c b/Utilities/cmliblzma/liblzma/simple/armthumb.c
index 4b890a3..4b49175 100644
--- a/Utilities/cmliblzma/liblzma/simple/armthumb.c
+++ b/Utilities/cmliblzma/liblzma/simple/armthumb.c
@@ -23,7 +23,6 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)),
for (i = 0; i + 4 <= size; i += 2) {
if ((buffer[i + 1] & 0xF8) == 0xF0
&& (buffer[i + 3] & 0xF8) == 0xF8) {
- uint32_t dest;
uint32_t src = ((buffer[i + 1] & 0x7) << 19)
| (buffer[i + 0] << 11)
| ((buffer[i + 3] & 0x7) << 8)
@@ -31,6 +30,7 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)),
src <<= 1;
+ uint32_t dest;
if (is_encoder)
dest = now_pos + (uint32_t)(i) + 4 + src;
else
diff --git a/Utilities/cmliblzma/liblzma/simple/ia64.c b/Utilities/cmliblzma/liblzma/simple/ia64.c
index c537cac..ce3692b 100644
--- a/Utilities/cmliblzma/liblzma/simple/ia64.c
+++ b/Utilities/cmliblzma/liblzma/simple/ia64.c
@@ -28,42 +28,36 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)),
size_t i;
for (i = 0; i + 16 <= size; i += 16) {
- size_t slot;
-
const uint32_t instr_template = buffer[i] & 0x1F;
const uint32_t mask = BRANCH_TABLE[instr_template];
uint32_t bit_pos = 5;
- for (slot = 0; slot < 3; ++slot, bit_pos += 41) {
+ for (size_t slot = 0; slot < 3; ++slot, bit_pos += 41) {
+ if (((mask >> slot) & 1) == 0)
+ continue;
+
const size_t byte_pos = (bit_pos >> 3);
const uint32_t bit_res = bit_pos & 0x7;
uint64_t instruction = 0;
- uint64_t inst_norm;
- size_t j;
-
- if (((mask >> slot) & 1) == 0)
- continue;
- for (j = 0; j < 6; ++j)
+ for (size_t j = 0; j < 6; ++j)
instruction += (uint64_t)(
buffer[i + j + byte_pos])
<< (8 * j);
- inst_norm = instruction >> bit_res;
+ uint64_t inst_norm = instruction >> bit_res;
if (((inst_norm >> 37) & 0xF) == 0x5
&& ((inst_norm >> 9) & 0x7) == 0
/* && (inst_norm & 0x3F)== 0 */
) {
- uint32_t dest;
- size_t j;
-
uint32_t src = (uint32_t)(
(inst_norm >> 13) & 0xFFFFF);
src |= ((inst_norm >> 36) & 1) << 20;
src <<= 4;
+ uint32_t dest;
if (is_encoder)
dest = now_pos + (uint32_t)(i) + src;
else
@@ -79,7 +73,7 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)),
instruction &= (1 << bit_res) - 1;
instruction |= (inst_norm << bit_res);
- for (j = 0; j < 6; j++)
+ for (size_t j = 0; j < 6; j++)
buffer[i + j + byte_pos] = (uint8_t)(
instruction
>> (8 * j));
diff --git a/Utilities/cmliblzma/liblzma/simple/simple_coder.c b/Utilities/cmliblzma/liblzma/simple/simple_coder.c
index d147d4b..a02b039 100644
--- a/Utilities/cmliblzma/liblzma/simple/simple_coder.c
+++ b/Utilities/cmliblzma/liblzma/simple/simple_coder.c
@@ -71,9 +71,6 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
size_t in_size, uint8_t *restrict out,
size_t *restrict out_pos, size_t out_size, lzma_action action)
{
- size_t out_avail;
- size_t buf_avail;
-
// TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it
// in cases when the filter is able to filter everything. With most
// simple filters it can be done at offset that is a multiple of 2,
@@ -108,13 +105,9 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
// more data to out[] hopefully filling it completely. Then filter
// the data in out[]. This step is where most of the data gets
// filtered if the buffer sizes used by the application are reasonable.
- out_avail = out_size - *out_pos;
- buf_avail = coder->size - coder->pos;
+ const size_t out_avail = out_size - *out_pos;
+ const size_t buf_avail = coder->size - coder->pos;
if (out_avail > buf_avail || buf_avail == 0) {
- size_t size;
- size_t filtered;
- size_t unfiltered;
-
// Store the old position so that we know from which byte
// to start filtering.
const size_t out_start = *out_pos;
@@ -137,10 +130,11 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
}
// Filter out[].
- size = *out_pos - out_start;
- filtered = call_filter(coder, out + out_start, size);
+ const size_t size = *out_pos - out_start;
+ const size_t filtered = call_filter(
+ coder, out + out_start, size);
- unfiltered = size - filtered;
+ const size_t unfiltered = size - filtered;
assert(unfiltered <= coder->allocated / 2);
// Now we can update coder->pos and coder->size, because
diff --git a/Utilities/cmliblzma/liblzma/simple/simple_decoder.c b/Utilities/cmliblzma/liblzma/simple/simple_decoder.c
index 034e158..0beccd3 100644
--- a/Utilities/cmliblzma/liblzma/simple/simple_decoder.c
+++ b/Utilities/cmliblzma/liblzma/simple/simple_decoder.c
@@ -17,15 +17,14 @@ extern lzma_ret
lzma_simple_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_bcj *opt;
-
if (props_size == 0)
return LZMA_OK;
if (props_size != 4)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_bcj), allocator);
+ lzma_options_bcj *opt = lzma_alloc(
+ sizeof(lzma_options_bcj), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/simple/sparc.c b/Utilities/cmliblzma/liblzma/simple/sparc.c
index 0ddd2ac..8270d6a 100644
--- a/Utilities/cmliblzma/liblzma/simple/sparc.c
+++ b/Utilities/cmliblzma/liblzma/simple/sparc.c
@@ -26,8 +26,6 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)),
|| (buffer[i] == 0x7F
&& (buffer[i + 1] & 0xC0) == 0xC0)) {
- uint32_t dest;
-
uint32_t src = ((uint32_t)buffer[i + 0] << 24)
| ((uint32_t)buffer[i + 1] << 16)
| ((uint32_t)buffer[i + 2] << 8)
@@ -35,6 +33,7 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)),
src <<= 2;
+ uint32_t dest;
if (is_encoder)
dest = now_pos + (uint32_t)(i) + src;
else
diff --git a/Utilities/cmliblzma/liblzma/simple/x86.c b/Utilities/cmliblzma/liblzma/simple/x86.c
index 95858e5..dbaaf9d 100644
--- a/Utilities/cmliblzma/liblzma/simple/x86.c
+++ b/Utilities/cmliblzma/liblzma/simple/x86.c
@@ -36,36 +36,30 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
uint32_t prev_mask = simple->prev_mask;
uint32_t prev_pos = simple->prev_pos;
- size_t limit;
- size_t buffer_pos;
-
if (size < 5)
return 0;
if (now_pos - prev_pos > 5)
prev_pos = now_pos - 5;
- limit = size - 5;
- buffer_pos = 0;
+ const size_t limit = size - 5;
+ size_t buffer_pos = 0;
while (buffer_pos <= limit) {
- uint32_t offset;
- uint32_t i;
-
uint8_t b = buffer[buffer_pos];
if (b != 0xE8 && b != 0xE9) {
++buffer_pos;
continue;
}
- offset = now_pos + (uint32_t)(buffer_pos)
+ const uint32_t offset = now_pos + (uint32_t)(buffer_pos)
- prev_pos;
prev_pos = now_pos + (uint32_t)(buffer_pos);
if (offset > 5) {
prev_mask = 0;
} else {
- for (i = 0; i < offset; ++i) {
+ for (uint32_t i = 0; i < offset; ++i) {
prev_mask &= 0x77;
prev_mask <<= 1;
}
@@ -84,8 +78,6 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
uint32_t dest;
while (true) {
- uint32_t i;
-
if (is_encoder)
dest = src + (now_pos + (uint32_t)(
buffer_pos) + 5);
@@ -96,7 +88,8 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
if (prev_mask == 0)
break;
- i = MASK_TO_BIT_NUMBER[prev_mask >> 1];
+ const uint32_t i = MASK_TO_BIT_NUMBER[
+ prev_mask >> 1];
b = (uint8_t)(dest >> (24 - i * 8));