summaryrefslogtreecommitdiffstats
path: root/Utilities/cmliblzma/liblzma/lzma
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmliblzma/liblzma/lzma')
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/fastpos.h2
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c9
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c18
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_common.h9
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c162
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c72
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c26
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c201
-rw-r--r--Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c7
9 files changed, 196 insertions, 310 deletions
diff --git a/Utilities/cmliblzma/liblzma/lzma/fastpos.h b/Utilities/cmliblzma/liblzma/lzma/fastpos.h
index 5a834d6..4aea231 100644
--- a/Utilities/cmliblzma/liblzma/lzma/fastpos.h
+++ b/Utilities/cmliblzma/liblzma/lzma/fastpos.h
@@ -75,8 +75,6 @@
// on all systems I have tried. The size optimized version is sometimes
// slightly faster, but sometimes it is a lot slower.
-#include "config.h"
-
#ifdef HAVE_SMALL
# define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos))
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
index ca14c4a..3e42575 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_decoder.c
@@ -224,8 +224,6 @@ static lzma_ret
lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options)
{
- const lzma_options_lzma *options = opt;
-
if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL)
@@ -237,6 +235,8 @@ lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
lz->coder->lzma = LZMA_LZ_DECODER_INIT;
}
+ const lzma_options_lzma *options = opt;
+
lz->coder->sequence = SEQ_CONTROL;
lz->coder->need_properties = true;
lz->coder->need_dictionary_reset = options->preset_dict == NULL
@@ -272,8 +272,6 @@ extern lzma_ret
lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_lzma *opt;
-
if (props_size != 1)
return LZMA_OPTIONS_ERROR;
@@ -285,7 +283,8 @@ lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
if (props[0] > 40)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
+ lzma_options_lzma *opt = lzma_alloc(
+ sizeof(lzma_options_lzma), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
index 8784f5d..992720c 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma2_encoder.c
@@ -54,14 +54,13 @@ struct lzma_coder_s {
static void
lzma2_header_lzma(lzma_coder *coder)
{
- size_t pos;
- size_t size;
-
assert(coder->uncompressed_size > 0);
assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
assert(coder->compressed_size > 0);
assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
+ size_t pos;
+
if (coder->need_properties) {
pos = 0;
@@ -82,7 +81,7 @@ lzma2_header_lzma(lzma_coder *coder)
coder->buf_pos = pos;
// Uncompressed size
- size = coder->uncompressed_size - 1;
+ size_t size = coder->uncompressed_size - 1;
coder->buf[pos++] += size >> 16;
coder->buf[pos++] = (size >> 8) & 0xFF;
coder->buf[pos++] = size & 0xFF;
@@ -163,9 +162,6 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Fall through
case SEQ_LZMA_ENCODE: {
- uint32_t read_start;
- lzma_ret ret;
-
// Calculate how much more uncompressed data this chunk
// could accept.
const uint32_t left = LZMA2_UNCOMPRESSED_MAX
@@ -186,10 +182,10 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Save the start position so that we can update
// coder->uncompressed_size.
- read_start = mf->read_pos - mf->read_ahead;
+ const uint32_t read_start = mf->read_pos - mf->read_ahead;
// Call the LZMA encoder until the chunk is finished.
- ret = lzma_lzma_encode(coder->lzma, mf,
+ const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf,
coder->buf + LZMA2_HEADER_MAX,
&coder->compressed_size,
LZMA2_CHUNK_MAX, limit);
@@ -277,8 +273,6 @@ lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
static lzma_ret
lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
{
- lzma_options_lzma *opt;
-
// New options can be set only when there is no incomplete chunk.
// This is the case at the beginning of the raw stream and right
// after LZMA_SYNC_FLUSH.
@@ -287,7 +281,7 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
// Look if there are new options. At least for now,
// only lc/lp/pb can be changed.
- opt = filter->options;
+ const lzma_options_lzma *opt = filter->options;
if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp
|| coder->opt_cur.pb != opt->pb) {
// Validate the options.
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
index 36267dc..e31e285 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_common.h
@@ -129,15 +129,12 @@ static inline void
literal_init(probability (*probs)[LITERAL_CODER_SIZE],
uint32_t lc, uint32_t lp)
{
- uint32_t coders;
- uint32_t i, j;
-
assert(lc + lp <= LZMA_LCLP_MAX);
- coders = 1U << (lc + lp);
+ const uint32_t coders = 1U << (lc + lp);
- for (i = 0; i < coders; ++i)
- for (j = 0; j < LITERAL_CODER_SIZE; ++j)
+ for (uint32_t i = 0; i < coders; ++i)
+ for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j)
bit_reset(probs[i][j]);
return;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
index 1bee2a9..9979bb4 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_decoder.c
@@ -114,33 +114,33 @@ do { \
case seq ## _CHOICE: \
rc_if_0(ld.choice, seq ## _CHOICE) { \
rc_update_0(ld.choice); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW0); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW1); \
- rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW2); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \
target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \
} else { \
rc_update_1(ld.choice); \
case seq ## _CHOICE2: \
rc_if_0(ld.choice2, seq ## _CHOICE2) { \
rc_update_0(ld.choice2); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID0); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID1); \
- rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
seq ## _MID2); \
target = symbol - LEN_MID_SYMBOLS \
+ MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
} else { \
rc_update_1(ld.choice2); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH0); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH1); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH2); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH3); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH4); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH5); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH6); \
- rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH7); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \
target = symbol - LEN_HIGH_SYMBOLS \
+ MATCH_LEN_MIN \
+ LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \
@@ -285,6 +285,13 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
const uint8_t *restrict in,
size_t *restrict in_pos, size_t in_size)
{
+ ////////////////////
+ // Initialization //
+ ////////////////////
+
+ if (!rc_read_init(&coder->rc, in, in_pos, in_size))
+ return LZMA_OK;
+
///////////////
// Variables //
///////////////
@@ -331,16 +338,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos)
dict.limit = dict.pos + (size_t)(coder->uncompressed_size);
- ////////////////////
- // Initialization //
- ////////////////////
-
- if (!rc_read_init(&coder->rc, in, in_pos, in_size))
- return LZMA_OK;
-
- rc = coder->rc;
- rc_in_pos = *in_pos;
-
// The main decoder loop. The "switch" is used to restart the decoder at
// correct location. Once restarted, the "switch" is no longer used.
switch (coder->sequence)
@@ -356,21 +353,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
break;
rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) {
- static const lzma_lzma_state next_state[] = {
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_LIT_LIT,
- STATE_MATCH_LIT_LIT,
- STATE_REP_LIT_LIT,
- STATE_SHORTREP_LIT_LIT,
- STATE_MATCH_LIT,
- STATE_REP_LIT,
- STATE_SHORTREP_LIT,
- STATE_MATCH_LIT,
- STATE_REP_LIT
- };
-
rc_update_0(coder->is_match[state][pos_state]);
// It's a literal i.e. a single 8-bit byte.
@@ -388,21 +370,16 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_LITERAL);
} while (symbol < (1 << 8));
#else
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL0);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL1);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL2);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL3);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL4);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL5);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL6);
- rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL7);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL0);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL1);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL2);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL3);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL4);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL5);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL6);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL7);
#endif
} else {
-#ifndef HAVE_SMALL
- uint32_t match_bit;
- uint32_t subcoder_index;
-#endif
-
// Decode literal with match byte.
//
// We store the byte we compare against
@@ -441,6 +418,8 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (symbol < (1 << 8));
#else
// Unroll the loop.
+ uint32_t match_bit;
+ uint32_t subcoder_index;
# define d(seq) \
case seq: \
@@ -474,6 +453,20 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// Use a lookup table to update to literal state,
// since compared to other state updates, this would
// need two branches.
+ static const lzma_lzma_state next_state[] = {
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_MATCH_LIT_LIT,
+ STATE_REP_LIT_LIT,
+ STATE_SHORTREP_LIT_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT,
+ STATE_SHORTREP_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT
+ };
state = next_state[state];
case SEQ_LITERAL_WRITE:
@@ -518,12 +511,12 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_POS_SLOT);
} while (symbol < POS_SLOTS);
#else
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT0);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT1);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT2);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT3);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT4);
- rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT5);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT0);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT1);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT2);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT3);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT4);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT5);
#endif
// Get rid of the highest bit that was needed for
// indexing of the probability array.
@@ -571,25 +564,25 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
switch (limit) {
case 5:
assert(offset == 0);
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1,
SEQ_POS_MODEL);
++offset;
--limit;
case 4:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
--limit;
case 3:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
--limit;
case 2:
- rc_bit(probs[symbol], 0,
+ rc_bit(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
++offset;
@@ -601,7 +594,7 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// rc_bit_last() here to omit
// the unneeded updating of
// "symbol".
- rc_bit_last(probs[symbol], 0,
+ rc_bit_last(probs[symbol], ,
rep0 += 1 << offset,
SEQ_POS_MODEL);
}
@@ -635,19 +628,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (++offset < ALIGN_BITS);
#else
case SEQ_ALIGN0:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 1, SEQ_ALIGN0);
case SEQ_ALIGN1:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 2, SEQ_ALIGN1);
case SEQ_ALIGN2:
- rc_bit(coder->pos_align[symbol], 0,
+ rc_bit(coder->pos_align[symbol], ,
rep0 += 4, SEQ_ALIGN2);
case SEQ_ALIGN3:
// Like in SEQ_POS_MODEL, we don't
// need "symbol" for anything else
// than indexing the probability array.
- rc_bit_last(coder->pos_align[symbol], 0,
+ rc_bit_last(coder->pos_align[symbol], ,
rep0 += 8, SEQ_ALIGN3);
#endif
@@ -732,11 +725,9 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// is stored to rep0 and rep1, rep2 and rep3
// are updated accordingly.
rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) {
- uint32_t distance;
-
rc_update_0(coder->is_rep1[state]);
- distance = rep1;
+ const uint32_t distance = rep1;
rep1 = rep0;
rep0 = distance;
@@ -745,23 +736,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
case SEQ_IS_REP2:
rc_if_0(coder->is_rep2[state],
SEQ_IS_REP2) {
- uint32_t distance;
-
rc_update_0(coder->is_rep2[
state]);
- distance = rep2;
+ const uint32_t distance = rep2;
rep2 = rep1;
rep1 = rep0;
rep0 = distance;
} else {
- uint32_t distance;
-
rc_update_1(coder->is_rep2[
state]);
- distance = rep3;
+ const uint32_t distance = rep3;
rep3 = rep2;
rep2 = rep1;
rep1 = rep0;
@@ -866,9 +853,6 @@ lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
static void
lzma_decoder_reset(lzma_coder *coder, const void *opt)
{
- uint32_t i, j, pos_state;
- uint32_t num_pos_states;
-
const lzma_options_lzma *options = opt;
// NOTE: We assume that lc/lp/pb are valid since they were
@@ -895,8 +879,8 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
rc_reset(coder->rc);
// Bit and bittree decoders
- for (i = 0; i < STATES; ++i) {
- for (j = 0; j <= coder->pos_mask; ++j) {
+ for (uint32_t i = 0; i < STATES; ++i) {
+ for (uint32_t j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]);
}
@@ -907,22 +891,22 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
bit_reset(coder->is_rep2[i]);
}
- for (i = 0; i < LEN_TO_POS_STATES; ++i)
+ for (uint32_t i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
- for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ for (uint32_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]);
bittree_reset(coder->pos_align, ALIGN_BITS);
// Len decoders (also bit/bittree)
- num_pos_states = 1U << options->pb;
+ const uint32_t num_pos_states = 1U << options->pb;
bit_reset(coder->match_len_decoder.choice);
bit_reset(coder->match_len_decoder.choice2);
bit_reset(coder->rep_len_decoder.choice);
bit_reset(coder->rep_len_decoder.choice2);
- for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(coder->match_len_decoder.low[pos_state],
LEN_LOW_BITS);
bittree_reset(coder->match_len_decoder.mid[pos_state],
@@ -952,8 +936,6 @@ extern lzma_ret
lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options)
{
- const lzma_options_lzma *options = opt;
-
if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL)
@@ -966,6 +948,7 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
// All dictionary sizes are OK here. LZ decoder will take care of
// the special cases.
+ const lzma_options_lzma *options = opt;
lz_options->dict_size = options->dict_size;
lz_options->preset_dict = options->preset_dict;
lz_options->preset_dict_size = options->preset_dict_size;
@@ -1045,12 +1028,11 @@ extern lzma_ret
lzma_lzma_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size)
{
- lzma_options_lzma *opt;
-
if (props_size != 5)
return LZMA_OPTIONS_ERROR;
- opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
+ lzma_options_lzma *opt
+ = lzma_alloc(sizeof(lzma_options_lzma), allocator);
if (opt == NULL)
return LZMA_MEM_ERROR;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
index 6186f83..0b9ee9e 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder.c
@@ -28,14 +28,11 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder,
symbol += UINT32_C(1) << 8;
do {
- uint32_t match_bit;
- uint32_t subcoder_index;
- uint32_t bit;
-
match_byte <<= 1;
- match_bit = match_byte & offset;
- subcoder_index = offset + match_bit + (symbol >> 8);
- bit = (symbol >> 7) & 1;
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
rc_bit(rc, &subcoder[subcoder_index], bit);
symbol <<= 1;
@@ -80,19 +77,16 @@ literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
static void
length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
{
- uint32_t a0, a1, b0, b1;
- uint32_t *prices;
- uint32_t i;
-
const uint32_t table_size = lc->table_size;
lc->counters[pos_state] = table_size;
- a0 = rc_bit_0_price(lc->choice);
- a1 = rc_bit_1_price(lc->choice);
- b0 = a1 + rc_bit_0_price(lc->choice2);
- b1 = a1 + rc_bit_1_price(lc->choice2);
- prices = lc->prices[pos_state];
+ const uint32_t a0 = rc_bit_0_price(lc->choice);
+ const uint32_t a1 = rc_bit_1_price(lc->choice);
+ const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2);
+ const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2);
+ uint32_t *const prices = lc->prices[pos_state];
+ uint32_t i;
for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i)
prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
LEN_LOW_BITS, i);
@@ -149,16 +143,13 @@ static inline void
match(lzma_coder *coder, const uint32_t pos_state,
const uint32_t distance, const uint32_t len)
{
- uint32_t pos_slot;
- uint32_t len_to_pos_state;
-
update_match(coder->state);
length(&coder->rc, &coder->match_len_encoder, pos_state, len,
coder->fast_mode);
- pos_slot = get_pos_slot(distance);
- len_to_pos_state = get_len_to_pos_state(len);
+ const uint32_t pos_slot = get_pos_slot(distance);
+ const uint32_t len_to_pos_state = get_len_to_pos_state(len);
rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state],
POS_SLOT_BITS, pos_slot);
@@ -322,19 +313,14 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint8_t *restrict out, size_t *restrict out_pos,
size_t out_size, uint32_t limit)
{
- uint32_t position;
-
// Initialize the stream if no data has been encoded yet.
if (!coder->is_initialized && !encode_init(coder, mf))
return LZMA_OK;
// Get the lowest bits of the uncompressed offset from the LZ layer.
- position = mf_position(mf);
+ uint32_t position = mf_position(mf);
while (true) {
- uint32_t len;
- uint32_t back;
-
// Encode pending bits, if any. Calling this before encoding
// the next symbol is needed only with plain LZMA, since
// LZMA2 always provides big enough buffer to flush
@@ -373,6 +359,8 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// - UINT32_MAX: not a match but a literal
// Value ranges for len:
// - [MATCH_LEN_MIN, MATCH_LEN_MAX]
+ uint32_t len;
+ uint32_t back;
if (coder->fast_mode)
lzma_lzma_optimum_fast(coder, mf, &back, &len);
@@ -465,12 +453,10 @@ static void
length_encoder_reset(lzma_length_encoder *lencoder,
const uint32_t num_pos_states, const bool fast_mode)
{
- size_t pos_state;
-
bit_reset(lencoder->choice);
bit_reset(lencoder->choice2);
- for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS);
bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS);
}
@@ -478,7 +464,7 @@ length_encoder_reset(lzma_length_encoder *lencoder,
bittree_reset(lencoder->high, LEN_HIGH_BITS);
if (!fast_mode)
- for (pos_state = 0; pos_state < num_pos_states;
+ for (size_t pos_state = 0; pos_state < num_pos_states;
++pos_state)
length_update_prices(lencoder, pos_state);
@@ -489,8 +475,6 @@ length_encoder_reset(lzma_length_encoder *lencoder,
extern lzma_ret
lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
{
- size_t i, j;
-
if (!is_options_valid(options))
return LZMA_OPTIONS_ERROR;
@@ -503,14 +487,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
// State
coder->state = STATE_LIT_LIT;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (size_t i = 0; i < REP_DISTANCES; ++i)
coder->reps[i] = 0;
literal_init(coder->literal, options->lc, options->lp);
// Bit encoders
- for (i = 0; i < STATES; ++i) {
- for (j = 0; j <= coder->pos_mask; ++j) {
+ for (size_t i = 0; i < STATES; ++i) {
+ for (size_t j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]);
}
@@ -521,11 +505,11 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
bit_reset(coder->is_rep2[i]);
}
- for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ for (size_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]);
// Bit tree encoders
- for (i = 0; i < LEN_TO_POS_STATES; ++i)
+ for (size_t i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
bittree_reset(coder->pos_align, ALIGN_BITS);
@@ -564,9 +548,6 @@ extern lzma_ret
lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
const lzma_options_lzma *options, lzma_lz_options *lz_options)
{
- lzma_coder *coder;
- uint32_t log_size = 0;
-
// Allocate lzma_coder if it wasn't already allocated.
if (*coder_ptr == NULL) {
*coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator);
@@ -574,7 +555,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
return LZMA_MEM_ERROR;
}
- coder = *coder_ptr;
+ lzma_coder *coder = *coder_ptr;
// Set compression mode. We haven't validates the options yet,
// but it's OK here, since nothing bad happens with invalid
@@ -590,6 +571,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
// Set dist_table_size.
// Round the dictionary size up to next 2^n.
+ uint32_t log_size = 0;
while ((UINT32_C(1) << log_size) < options->dict_size)
++log_size;
@@ -643,15 +625,13 @@ lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern uint64_t
lzma_lzma_encoder_memusage(const void *options)
{
- lzma_lz_options lz_options;
- uint64_t lz_memusage;
-
if (!is_options_valid(options))
return UINT64_MAX;
+ lzma_lz_options lz_options;
set_lz_options(&lz_options, options);
- lz_memusage = lzma_lz_encoder_memusage(&lz_options);
+ const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options);
if (lz_memusage == UINT64_MAX)
return UINT64_MAX;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
index 52c26e4..f835f69 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_fast.c
@@ -20,14 +20,6 @@ extern void
lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res)
{
- const uint8_t *buf;
- uint32_t buf_avail;
- uint32_t i;
- uint32_t rep_len = 0;
- uint32_t rep_index = 0;
- uint32_t back_main = 0;
- uint32_t limit;
-
const uint32_t nice_len = mf->nice_len;
uint32_t len_main;
@@ -40,8 +32,8 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count;
}
- buf = mf_ptr(mf) - 1;
- buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
+ const uint8_t *buf = mf_ptr(mf) - 1;
+ const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) {
// There's not enough input left to encode a match.
@@ -51,9 +43,10 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
// Look for repeated matches; scan the previous four match distances
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len;
+ uint32_t rep_len = 0;
+ uint32_t rep_index = 0;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
// Pointer to the beginning of the match candidate
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
@@ -64,6 +57,7 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// The first two bytes matched.
// Calculate the length of the match.
+ uint32_t len;
for (len = 2; len < buf_avail
&& buf[len] == buf_back[len]; ++len) ;
@@ -92,6 +86,7 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
return;
}
+ uint32_t back_main = 0;
if (len_main >= 2) {
back_main = coder->matches[matches_count - 1].dist;
@@ -158,16 +153,15 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the old buf pointer instead of recalculating it with mf_ptr().
++buf;
- limit = len_main - 1;
-
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len;
+ const uint32_t limit = len_main - 1;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back))
continue;
+ uint32_t len;
for (len = 2; len < limit
&& buf[len] == buf_back[len]; ++len) ;
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
index d2829a2..7e85649 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_optimum_normal.c
@@ -35,15 +35,12 @@ get_literal_price(const lzma_coder *const coder, const uint32_t pos,
symbol += UINT32_C(1) << 8;
do {
- uint32_t match_bit;
- uint32_t subcoder_index;
- uint32_t bit;
-
match_byte <<= 1;
- match_bit = match_byte & offset;
- subcoder_index = offset + match_bit + (symbol >> 8);
- bit = (symbol >> 7) & 1;
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
price += rc_bit_price(subcoder[subcoder_index], bit);
symbol <<= 1;
@@ -134,11 +131,7 @@ get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
static void
fill_distances_prices(lzma_coder *coder)
{
- uint32_t len_to_pos_state;
- uint32_t pos_slot;
- uint32_t i;
-
- for (len_to_pos_state = 0;
+ for (uint32_t len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state) {
@@ -146,7 +139,7 @@ fill_distances_prices(lzma_coder *coder)
= coder->pos_slot_prices[len_to_pos_state];
// Price to encode the pos_slot.
- for (pos_slot = 0;
+ for (uint32_t pos_slot = 0;
pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] = rc_bittree_price(
coder->pos_slot[len_to_pos_state],
@@ -155,7 +148,7 @@ fill_distances_prices(lzma_coder *coder)
// For matches with distance >= FULL_DISTANCES, add the price
// of the direct bits part of the match distance. (Align bits
// are handled by fill_align_prices()).
- for (pos_slot = END_POS_MODEL_INDEX;
+ for (uint32_t pos_slot = END_POS_MODEL_INDEX;
pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] += rc_direct_price(
((pos_slot >> 1) - 1) - ALIGN_BITS);
@@ -163,7 +156,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [0, 3] are fully encoded with
// pos_slot, so they are used for coder->distances_prices
// as is.
- for (i = 0; i < START_POS_MODEL_INDEX; ++i)
+ for (uint32_t i = 0; i < START_POS_MODEL_INDEX; ++i)
coder->distances_prices[len_to_pos_state][i]
= pos_slot_prices[i];
}
@@ -171,7 +164,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [4, 127] depend on pos_slot and pos_special.
// We do this in a loop separate from the above loop to avoid
// redundant calls to get_pos_slot().
- for (i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
+ for (uint32_t i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
const uint32_t pos_slot = get_pos_slot(i);
const uint32_t footer_bits = ((pos_slot >> 1) - 1);
const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
@@ -179,7 +172,7 @@ fill_distances_prices(lzma_coder *coder)
coder->pos_special + base - pos_slot - 1,
footer_bits, i - base);
- for (len_to_pos_state = 0;
+ for (uint32_t len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state)
coder->distances_prices[len_to_pos_state][i]
@@ -195,8 +188,7 @@ fill_distances_prices(lzma_coder *coder)
static void
fill_align_prices(lzma_coder *coder)
{
- uint32_t i;
- for (i = 0; i < ALIGN_TABLE_SIZE; ++i)
+ for (uint32_t i = 0; i < ALIGN_TABLE_SIZE; ++i)
coder->align_prices[i] = rc_bittree_reverse_price(
coder->pos_align, ALIGN_BITS, i);
@@ -233,15 +225,12 @@ static void
backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
uint32_t *restrict back_res, uint32_t cur)
{
+ coder->opts_end_index = cur;
+
uint32_t pos_mem = coder->opts[cur].pos_prev;
uint32_t back_mem = coder->opts[cur].back_prev;
- coder->opts_end_index = cur;
-
do {
- const uint32_t pos_prev = pos_mem;
- const uint32_t back_cur = back_mem;
-
if (coder->opts[cur].prev_1_is_literal) {
make_literal(&coder->opts[pos_mem]);
coder->opts[pos_mem].pos_prev = pos_mem - 1;
@@ -256,6 +245,9 @@ backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
}
}
+ const uint32_t pos_prev = pos_mem;
+ const uint32_t back_cur = back_mem;
+
back_mem = coder->opts[pos_prev].back_prev;
pos_mem = coder->opts[pos_prev].pos_prev;
@@ -282,23 +274,6 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position)
{
- uint32_t buf_avail;
- const uint8_t *buf;
- uint32_t rep_lens[REP_DISTANCES];
- uint32_t rep_max_index = 0;
- uint32_t i;
-
- uint8_t current_byte;
- uint8_t match_byte;
-
- uint32_t pos_state;
- uint32_t match_price;
- uint32_t rep_match_price;
- uint32_t len_end;
- uint32_t len;
-
- uint32_t normal_match_price;
-
const uint32_t nice_len = mf->nice_len;
uint32_t len_main;
@@ -312,18 +287,19 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count;
}
- buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
+ const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) {
*back_res = UINT32_MAX;
*len_res = 1;
return UINT32_MAX;
}
- buf = mf_ptr(mf) - 1;
+ const uint8_t *const buf = mf_ptr(mf) - 1;
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t len_test;
+ uint32_t rep_lens[REP_DISTANCES];
+ uint32_t rep_max_index = 0;
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back)) {
@@ -331,6 +307,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
continue;
}
+ uint32_t len_test;
for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test];
++len_test) ;
@@ -356,8 +333,8 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
return UINT32_MAX;
}
- current_byte = *buf;
- match_byte = *(buf - coder->reps[0] - 1);
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - coder->reps[0] - 1);
if (len_main < 2 && current_byte != match_byte
&& rep_lens[rep_max_index] < 2) {
@@ -368,7 +345,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[0].state = coder->state;
- pos_state = position & coder->pos_mask;
+ const uint32_t pos_state = position & coder->pos_mask;
coder->opts[1].price = rc_bit_0_price(
coder->is_match[coder->state][pos_state])
@@ -378,9 +355,9 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
make_literal(&coder->opts[1]);
- match_price = rc_bit_1_price(
+ const uint32_t match_price = rc_bit_1_price(
coder->is_match[coder->state][pos_state]);
- rep_match_price = match_price
+ const uint32_t rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[coder->state]);
if (match_byte == current_byte) {
@@ -394,7 +371,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
}
- len_end = my_max(len_main, rep_lens[rep_max_index]);
+ const uint32_t len_end = my_max(len_main, rep_lens[rep_max_index]);
if (len_end < 2) {
*back_res = coder->opts[1].back_prev;
@@ -404,23 +381,21 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[1].pos_prev = 0;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
coder->opts[0].backs[i] = coder->reps[i];
- len = len_end;
+ uint32_t len = len_end;
do {
coder->opts[len].price = RC_INFINITY_PRICE;
} while (--len >= 2);
- for (i = 0; i < REP_DISTANCES; ++i) {
- uint32_t price;
-
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
uint32_t rep_len = rep_lens[i];
if (rep_len < 2)
continue;
- price = rep_match_price + get_pure_rep_price(
+ const uint32_t price = rep_match_price + get_pure_rep_price(
coder, i, coder->state, pos_state);
do {
@@ -439,7 +414,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
}
- normal_match_price = match_price
+ const uint32_t normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[coder->state]);
len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2;
@@ -481,19 +456,6 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
uint32_t new_len = coder->longest_match_length;
uint32_t pos_prev = coder->opts[cur].pos_prev;
lzma_lzma_state state;
- uint32_t buf_avail;
- uint32_t rep_index;
- uint32_t i;
-
- uint32_t cur_price;
- uint8_t current_byte;
- uint8_t match_byte;
- uint32_t pos_state;
- uint32_t cur_and_1_price;
- bool next_is_literal = false;
- uint32_t match_price;
- uint32_t rep_match_price;
- uint32_t start_len = 2;
if (coder->opts[cur].prev_1_is_literal) {
--pos_prev;
@@ -537,10 +499,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
}
if (pos < REP_DISTANCES) {
- uint32_t i;
-
reps[0] = coder->opts[pos_prev].backs[pos];
+ uint32_t i;
for (i = 1; i <= pos; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1];
@@ -550,28 +511,30 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
} else {
reps[0] = pos - REP_DISTANCES;
- for (i = 1; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 1; i < REP_DISTANCES; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1];
}
}
coder->opts[cur].state = state;
- for (i = 0; i < REP_DISTANCES; ++i)
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
coder->opts[cur].backs[i] = reps[i];
- cur_price = coder->opts[cur].price;
+ const uint32_t cur_price = coder->opts[cur].price;
- current_byte = *buf;
- match_byte = *(buf - reps[0] - 1);
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - reps[0] - 1);
- pos_state = position & coder->pos_mask;
+ const uint32_t pos_state = position & coder->pos_mask;
- cur_and_1_price = cur_price
+ const uint32_t cur_and_1_price = cur_price
+ rc_bit_0_price(coder->is_match[state][pos_state])
+ get_literal_price(coder, position, buf[-1],
!is_literal_state(state), match_byte, current_byte);
+ bool next_is_literal = false;
+
if (cur_and_1_price < coder->opts[cur + 1].price) {
coder->opts[cur + 1].price = cur_and_1_price;
coder->opts[cur + 1].pos_prev = cur;
@@ -579,9 +542,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
next_is_literal = true;
}
- match_price = cur_price
+ const uint32_t match_price = cur_price
+ rc_bit_1_price(coder->is_match[state][pos_state]);
- rep_match_price = match_price
+ const uint32_t rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[state]);
if (match_byte == current_byte
@@ -602,7 +565,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (buf_avail_full < 2)
return len_end;
- buf_avail = my_min(buf_avail_full, nice_len);
+ const uint32_t buf_avail = my_min(buf_avail_full, nice_len);
if (!next_is_literal && match_byte != current_byte) { // speed optimization
// try literal + rep0
@@ -616,26 +579,21 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
--len_test;
if (len_test >= 2) {
- uint32_t pos_state_next;
- uint32_t next_rep_match_price;
- uint32_t offset;
- uint32_t cur_and_len_price;
-
lzma_lzma_state state_2 = state;
update_literal(state_2);
- pos_state_next = (position + 1) & coder->pos_mask;
- next_rep_match_price = cur_and_1_price
+ const uint32_t pos_state_next = (position + 1) & coder->pos_mask;
+ const uint32_t next_rep_match_price = cur_and_1_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
//for (; len_test >= 2; --len_test) {
- offset = cur + 1 + len_test;
+ const uint32_t offset = cur + 1 + len_test;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- cur_and_len_price = next_rep_match_price
+ const uint32_t cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test,
state_2, pos_state_next);
@@ -651,14 +609,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
}
- for (rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
- uint32_t len_test, len_test_2, len_test_temp;
- uint32_t price, limit;
+ uint32_t start_len = 2; // speed optimization
+ for (uint32_t rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
const uint8_t *const buf_back = buf - reps[rep_index] - 1;
if (not_equal_16(buf, buf_back))
continue;
+ uint32_t len_test;
for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test];
++len_test) ;
@@ -666,8 +624,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
while (len_end < cur + len_test)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- len_test_temp = len_test;
- price = rep_match_price + get_pure_rep_price(
+ const uint32_t len_test_temp = len_test;
+ const uint32_t price = rep_match_price + get_pure_rep_price(
coder, rep_index, state, pos_state);
do {
@@ -689,8 +647,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
start_len = len_test + 1;
- len_test_2 = len_test + 1;
- limit = my_min(buf_avail_full,
+ uint32_t len_test_2 = len_test + 1;
+ const uint32_t limit = my_min(buf_avail_full,
len_test_2 + nice_len);
for (; len_test_2 < limit
&& buf[len_test_2] == buf_back[len_test_2];
@@ -699,18 +657,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1;
if (len_test_2 >= 2) {
- uint32_t pos_state_next;
- uint32_t cur_and_len_literal_price;
- uint32_t next_rep_match_price;
- uint32_t offset;
- uint32_t cur_and_len_price;
-
lzma_lzma_state state_2 = state;
update_long_rep(state_2);
- pos_state_next = (position + len_test) & coder->pos_mask;
+ uint32_t pos_state_next = (position + len_test) & coder->pos_mask;
- cur_and_len_literal_price = price
+ const uint32_t cur_and_len_literal_price = price
+ get_len_price(&coder->rep_len_encoder,
len_test, pos_state)
+ rc_bit_0_price(coder->is_match[state_2][pos_state_next])
@@ -722,17 +674,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
pos_state_next = (position + len_test + 1) & coder->pos_mask;
- next_rep_match_price = cur_and_len_literal_price
+ const uint32_t next_rep_match_price = cur_and_len_literal_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
//for(; len_test_2 >= 2; len_test_2--) {
- offset = cur + len_test + 1 + len_test_2;
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
- cur_and_len_price = next_rep_match_price
+ const uint32_t cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test_2,
state_2, pos_state_next);
@@ -763,19 +715,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (new_len >= start_len) {
- uint32_t len_test;
- uint32_t i = 0;
-
const uint32_t normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[state]);
while (len_end < cur + new_len)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
+ uint32_t i = 0;
while (start_len > coder->matches[i].len)
++i;
- for (len_test = start_len; ; ++len_test) {
+ for (uint32_t len_test = start_len; ; ++len_test) {
const uint32_t cur_back = coder->matches[i].dist;
uint32_t cur_and_len_price = normal_match_price
+ get_pos_len_price(coder,
@@ -803,16 +753,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1;
if (len_test_2 >= 2) {
- uint32_t pos_state_next;
- uint32_t cur_and_len_literal_price;
- uint32_t next_rep_match_price;
- uint32_t offset;
-
lzma_lzma_state state_2 = state;
update_match(state_2);
- pos_state_next = (position + len_test) & coder->pos_mask;
+ uint32_t pos_state_next
+ = (position + len_test) & coder->pos_mask;
- cur_and_len_literal_price = cur_and_len_price
+ const uint32_t cur_and_len_literal_price = cur_and_len_price
+ rc_bit_0_price(
coder->is_match[state_2][pos_state_next])
+ get_literal_price(coder,
@@ -825,14 +771,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
update_literal(state_2);
pos_state_next = (pos_state_next + 1) & coder->pos_mask;
- next_rep_match_price
+ const uint32_t next_rep_match_price
= cur_and_len_literal_price
+ rc_bit_1_price(
coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]);
// for(; len_test_2 >= 2; --len_test_2) {
- offset = cur + len_test + 1 + len_test_2;
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE;
@@ -869,10 +815,6 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position)
{
- uint32_t reps[REP_DISTANCES];
- uint32_t len_end;
- uint32_t cur;
-
// If we have symbols pending, return the next pending symbol.
if (coder->opts_end_index != coder->opts_current_index) {
assert(mf->read_ahead > 0);
@@ -899,13 +841,14 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the original function into two pieces makes it at least a little
// more readable, since those two parts don't share many variables.
- len_end = helper1(coder, mf, back_res, len_res, position);
+ uint32_t len_end = helper1(coder, mf, back_res, len_res, position);
if (len_end == UINT32_MAX)
return;
-
+ uint32_t reps[REP_DISTANCES];
memcpy(reps, coder->reps, sizeof(reps));
+ uint32_t cur;
for (cur = 1; cur < len_end; ++cur) {
assert(cur < OPTS);
diff --git a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
index 9332abf..8484b77 100644
--- a/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
+++ b/Utilities/cmliblzma/liblzma/lzma/lzma_encoder_presets.c
@@ -16,10 +16,6 @@
extern LZMA_API(lzma_bool)
lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
{
- static const uint8_t dict_pow2[]
- = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
- static const uint8_t depths[] = { 4, 8, 24, 48 };
-
const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK;
const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK;
const uint32_t supported_flags = LZMA_PRESET_EXTREME;
@@ -34,12 +30,15 @@ lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
options->lp = LZMA_LP_DEFAULT;
options->pb = LZMA_PB_DEFAULT;
+ static const uint8_t dict_pow2[]
+ = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
options->dict_size = UINT32_C(1) << dict_pow2[level];
if (level <= 3) {
options->mode = LZMA_MODE_FAST;
options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4;
options->nice_len = level <= 1 ? 128 : 273;
+ static const uint8_t depths[] = { 4, 8, 24, 48 };
options->depth = depths[level];
} else {
options->mode = LZMA_MODE_NORMAL;