diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2008-03-13 22:12:13 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2008-03-13 22:12:13 (GMT) |
commit | afd5021ef9dafc99fda0e47774152f4dc2e57a6f (patch) | |
tree | 8f1aec9a3037862e13a8325c5e9db3fb999bb38e /src | |
parent | 6763f7c8824fb415493cfdb88155449c4d22d16f (diff) | |
download | hdf5-afd5021ef9dafc99fda0e47774152f4dc2e57a6f.zip hdf5-afd5021ef9dafc99fda0e47774152f4dc2e57a6f.tar.gz hdf5-afd5021ef9dafc99fda0e47774152f4dc2e57a6f.tar.bz2 |
[svn-r14738] Description:
Bring r14737 back from the 1.8 branch: Fix bug which would
incorrectly encode the member offsets for compound datatypes whose size was
between 256 & 511 bytes, when the "use the latest format" feature was enabled.
Tested on:
Mac OS X/32 10.5.2 (amazon) w/debug
FreeBSD/32 6.2 (duty) w/production
Diffstat (limited to 'src')
-rw-r--r-- | src/H5B2int.c | 6 | ||||
-rw-r--r-- | src/H5FScache.c | 4 | ||||
-rw-r--r-- | src/H5FSsection.c | 4 | ||||
-rw-r--r-- | src/H5HFhdr.c | 2 | ||||
-rw-r--r-- | src/H5Odtype.c | 19 | ||||
-rw-r--r-- | src/H5Vprivate.h | 20 |
6 files changed, 44 insertions, 11 deletions
diff --git a/src/H5B2int.c b/src/H5B2int.c index 2625c63..eae1291 100644 --- a/src/H5B2int.c +++ b/src/H5B2int.c @@ -203,7 +203,7 @@ HDmemset(shared->page, 0, shared->node_size); /* Compute size to store # of records in each node */ /* (uses leaf # of records because its the largest) */ - shared->max_nrec_size = (H5V_log2_gen((uint64_t)shared->node_info[0].max_nrec) + 7) / 8; + shared->max_nrec_size = H5V_limit_enc_size((uint64_t)shared->node_info[0].max_nrec); HDassert(shared->max_nrec_size <= H5B2_SIZEOF_RECORDS_PER_NODE); /* Initialize internal node info */ @@ -217,7 +217,7 @@ HDmemset(shared->page, 0, shared->node_size); shared->node_info[u].cum_max_nrec = ((shared->node_info[u].max_nrec + 1) * shared->node_info[u - 1].cum_max_nrec) + shared->node_info[u].max_nrec; - shared->node_info[u].cum_max_nrec_size = (H5V_log2_gen((uint64_t)shared->node_info[u].cum_max_nrec) + 7) / 8; + shared->node_info[u].cum_max_nrec_size = H5V_limit_enc_size((uint64_t)shared->node_info[u].cum_max_nrec); if((shared->node_info[u].nat_rec_fac = H5FL_fac_init(shared->type->nrec_size * shared->node_info[u].max_nrec)) == NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create node native key block factory") @@ -576,7 +576,7 @@ H5B2_split_root(H5F_t *f, hid_t dxpl_id, H5B2_t *bt2, unsigned *bt2_flags_ptr) shared->node_info[shared->depth].merge_nrec = (shared->node_info[shared->depth].max_nrec * shared->merge_percent) / 100; shared->node_info[shared->depth].cum_max_nrec = ((shared->node_info[shared->depth].max_nrec + 1) * shared->node_info[shared->depth - 1].cum_max_nrec) + shared->node_info[shared->depth].max_nrec; - shared->node_info[shared->depth].cum_max_nrec_size = (H5V_log2_gen((uint64_t)shared->node_info[shared->depth].cum_max_nrec) + 7) / 8; + shared->node_info[shared->depth].cum_max_nrec_size = H5V_limit_enc_size((uint64_t)shared->node_info[shared->depth].cum_max_nrec); if((shared->node_info[shared->depth].nat_rec_fac = H5FL_fac_init(shared->type->nrec_size * shared->node_info[shared->depth].max_nrec)) == NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create node native key block factory") if((shared->node_info[shared->depth].node_ptr_fac = H5FL_fac_init(sizeof(H5B2_node_ptr_t) * (shared->node_info[shared->depth].max_nrec + 1))) == NULL) diff --git a/src/H5FScache.c b/src/H5FScache.c index 74a9aab..8ad9631 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -606,7 +606,7 @@ HDfprintf(stderr, "%s: fspace->addr = %a, fs_addr = %a\n", FUNC, fspace->addr, f unsigned sect_cnt_size; /* The size of the section size counts */ /* Compute the size of the section counts */ - sect_cnt_size = MAX(1, (H5V_log2_gen(fspace->serial_sect_count) + 7) / 8); + sect_cnt_size = H5V_limit_enc_size((uint64_t)fspace->serial_sect_count); #ifdef QAK HDfprintf(stderr, "%s: sect_cnt_size = %u\n", FUNC, sect_cnt_size); HDfprintf(stderr, "%s: fspace->sect_len_size = %u\n", FUNC, fspace->sect_len_size); @@ -905,7 +905,7 @@ HDfprintf(stderr, "%s: sinfo->fspace->addr = %a\n", FUNC, sinfo->fspace->addr); /* Set up user data for iterator */ udata.sinfo = sinfo; udata.p = &p; - udata.sect_cnt_size = MAX(1, (H5V_log2_gen(sinfo->fspace->serial_sect_count) + 7) / 8); + udata.sect_cnt_size = H5V_limit_enc_size((uint64_t)sinfo->fspace->serial_sect_count); #ifdef QAK HDfprintf(stderr, "%s: udata.sect_cnt_size = %u\n", FUNC, udata.sect_cnt_size); #endif /* QAK */ diff --git a/src/H5FSsection.c b/src/H5FSsection.c index c589aa0..34da067 100644 --- a/src/H5FSsection.c +++ b/src/H5FSsection.c @@ -147,7 +147,7 @@ H5FS_sinfo_new(H5F_t *f, H5FS_t *fspace) sinfo->nbins = H5V_log2_gen(fspace->max_sect_size); sinfo->sect_prefix_size = H5FS_SINFO_PREFIX_SIZE(f); sinfo->sect_off_size = (fspace->max_sect_addr + 7) / 8; - sinfo->sect_len_size = (H5V_log2_gen(fspace->max_sect_size) + 7) / 8; + sinfo->sect_len_size = H5V_limit_enc_size((uint64_t)fspace->max_sect_size); sinfo->fspace = fspace; #ifdef QAK HDfprintf(stderr, "%s: sinfo->nbins = %u\n", FUNC, sinfo->nbins); @@ -1300,7 +1300,7 @@ HDfprintf(stderr, "%s: fspace->sinfo->serial_size_count = %Zu\n", FUNC, fspace-> HDfprintf(stderr, "%s: fspace->sinfo->serial_size_count = %Zu\n", FUNC, fspace->sinfo->serial_size_count); HDfprintf(stderr, "%s: fspace->serial_sect_count = %Hu\n", FUNC, fspace->serial_sect_count); #endif /* QAK */ - sect_buf_size += fspace->sinfo->serial_size_count * MAX(1, ((H5V_log2_gen(fspace->serial_sect_count) + 7) / 8)); + sect_buf_size += fspace->sinfo->serial_size_count * H5V_limit_enc_size((uint64_t)fspace->serial_sect_count); /* Size for each differently sized serializable section */ sect_buf_size += fspace->sinfo->serial_size_count * fspace->sinfo->sect_len_size; diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c index c1b856b..60d5002 100644 --- a/src/H5HFhdr.c +++ b/src/H5HFhdr.c @@ -225,7 +225,7 @@ H5HF_hdr_finish_init_phase1(H5HF_hdr_t *hdr) /* Set the size of heap IDs */ hdr->heap_len_size = MIN(hdr->man_dtable.max_dir_blk_off_size, - ((H5V_log2_gen((uint64_t)hdr->max_man_size) + 7) / 8)); + H5V_limit_enc_size((uint64_t)hdr->max_man_size)); done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Odtype.c b/src/H5Odtype.c index ac07edf..ba343bc 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -232,10 +232,11 @@ H5O_dtype_decode_helper(H5F_t *f, const uint8_t **pp, H5T_t *dt) case H5T_COMPOUND: { unsigned offset_nbytes; /* Size needed to encode member offsets */ + size_t max_memb_pos = 0; /* Maximum member covered, so far */ unsigned j; /* Compute the # of bytes required to store a member offset */ - offset_nbytes = (H5V_log2_gen((uint64_t)dt->shared->size) + 7) / 8; + offset_nbytes = H5V_limit_enc_size((uint64_t)dt->shared->size); /* * Compound datatypes... @@ -336,6 +337,18 @@ H5O_dtype_decode_helper(H5F_t *f, const uint8_t **pp, H5T_t *dt) /* Set the field datatype (finally :-) */ dt->shared->u.compnd.memb[i].type = temp_type; + /* Check if this field overlaps with a prior field */ + /* (probably indicates that the file is corrupt) */ + if(i > 0 && dt->shared->u.compnd.memb[i].offset < max_memb_pos) { + for(j = 0; j < i; j++) + if(dt->shared->u.compnd.memb[i].offset >= dt->shared->u.compnd.memb[j].offset + && dt->shared->u.compnd.memb[i].offset < (dt->shared->u.compnd.memb[j].offset + dt->shared->u.compnd.memb[j].size)) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "member overlaps with previous member") + } /* end if */ + + /* Update the maximum member position covered */ + max_memb_pos = MAX(max_memb_pos, (dt->shared->u.compnd.memb[i].offset + dt->shared->u.compnd.memb[i].size)); + /* Check if the datatype stayed packed */ if(dt->shared->u.compnd.packed) { /* Check if the member type is packed */ @@ -734,7 +747,7 @@ H5O_dtype_encode_helper(const H5F_t *f, uint8_t **pp, const H5T_t *dt) unsigned offset_nbytes; /* Size needed to encode member offsets */ /* Compute the # of bytes required to store a member offset */ - offset_nbytes = (H5V_log2_gen((uint64_t)dt->shared->size) + 7) / 8; + offset_nbytes = H5V_limit_enc_size((uint64_t)dt->shared->size); /* * Compound datatypes... @@ -1097,7 +1110,7 @@ H5O_dtype_size(const H5F_t *f, const void *_mesg) unsigned offset_nbytes; /* Size needed to encode member offsets */ /* Compute the # of bytes required to store a member offset */ - offset_nbytes = (H5V_log2_gen((uint64_t)dt->shared->size) + 7) / 8; + offset_nbytes = H5V_limit_enc_size((uint64_t)dt->shared->size); /* Compute the total size needed to encode compound datatype */ for(u = 0; u < dt->shared->u.compnd.nmembs; u++) { diff --git a/src/H5Vprivate.h b/src/H5Vprivate.h index 94b3699..b92266c 100644 --- a/src/H5Vprivate.h +++ b/src/H5Vprivate.h @@ -410,5 +410,25 @@ H5V_log2_of2(uint32_t n) return(MultiplyDeBruijnBitPosition[(n * (uint32_t)0x077CB531UL) >> 27]); } /* H5V_log2_of2() */ + +/*------------------------------------------------------------------------- + * Function: H5V_limit_enc_size + * + * Purpose: Determine the # of bytes needed to encode values within a + * range from 0 to a given limit + * + * Return: Number of bytes needed + * + * Programmer: Quincey Koziol + * Thursday, March 13, 2008 + * + *------------------------------------------------------------------------- + */ +static H5_inline unsigned UNUSED +H5V_limit_enc_size(uint64_t limit) +{ + return (H5V_log2_gen(limit) / 8) + 1; +} /* end H5V_limit_enc_size() */ + #endif /* H5Vprivate_H */ |