diff options
author | Neil Fortner <nfortne2@hdfgroup.org> | 2015-05-21 17:13:16 (GMT) |
---|---|---|
committer | Neil Fortner <nfortne2@hdfgroup.org> | 2015-05-21 17:13:16 (GMT) |
commit | 948722cde8db7f53866393ada4c08b88b2a91e3e (patch) | |
tree | afbf15d2ff270b439b9c202d8a89c5d783871270 /src/H5Dbtree.c | |
parent | f7e10b55ab75857e8a5bade01749842b060b5783 (diff) | |
parent | 194d647721cb4f71e6330d11c244b3d7db8309e7 (diff) | |
download | hdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.zip hdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.tar.gz hdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.tar.bz2 |
[svn-r27103] Merge revisions 26780 through 27102 from trunk to vds branch.
Tested: ummon
Diffstat (limited to 'src/H5Dbtree.c')
-rw-r--r-- | src/H5Dbtree.c | 288 |
1 files changed, 178 insertions, 110 deletions
diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 75080f7..2933e5e 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -49,11 +49,6 @@ /* Local Macros */ /****************/ -/* - * Given a B-tree node return the dimensionality of the chunks pointed to by - * that node. - */ -#define H5D_BTREE_NDIMS(X) (((X)->sizeof_rkey-8)/8) /******************/ /* Local Typedefs */ @@ -74,8 +69,8 @@ * The chunk's file address is part of the B-tree and not part of the key. */ typedef struct H5D_btree_key_t { + hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ uint32_t nbytes; /*size of stored data */ - hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ unsigned filter_mask; /*excluded filters */ } H5D_btree_key_t; @@ -97,8 +92,9 @@ typedef struct H5D_btree_dbg_t { /* Local Prototypes */ /********************/ +static herr_t H5D__btree_shared_free(void *_shared); static herr_t H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, - unsigned ndims); + const H5O_layout_chunk_t *layout); /* B-tree iterator callbacks */ static int H5D__btree_idx_iterate_cb(H5F_t *f, hid_t dxpl_id, const void *left_key, @@ -203,6 +199,10 @@ H5B_class_t H5B_BTREE[1] = {{ /* Local Variables */ /*******************/ +/* Declare a free list to manage H5O_layout_chunk_t objects */ +H5FL_DEFINE_STATIC(H5O_layout_chunk_t); + + /*------------------------------------------------------------------------- * Function: H5D__btree_get_shared @@ -255,7 +255,7 @@ H5D__btree_get_shared(const H5F_t UNUSED *f, const void *_udata) *------------------------------------------------------------------------- */ static herr_t -H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, +H5D__btree_new_node(H5F_t *f, hid_t UNUSED dxpl_id, H5B_ins_t op, void *_lt_key, void *_udata, void *_rt_key, haddr_t *addr_p/*out*/) { @@ -265,7 +265,7 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, unsigned u; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_STATIC_NOERR /* check args */ HDassert(f); @@ -275,21 +275,19 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, HDassert(udata->common.layout->ndims > 0 && udata->common.layout->ndims < H5O_LAYOUT_NDIMS); HDassert(addr_p); - /* Allocate new storage */ - HDassert(udata->nbytes > 0); - H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t); - if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes))) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "couldn't allocate new file storage") - udata->addr = *addr_p; + /* Set address */ + HDassert(H5F_addr_defined(udata->chunk_block.offset)); + HDassert(udata->chunk_block.length > 0); + *addr_p = udata->chunk_block.offset; /* * The left key describes the storage of the UDATA chunk being * inserted into the tree. */ - lt_key->nbytes = udata->nbytes; + H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t); lt_key->filter_mask = udata->filter_mask; for(u = 0; u < udata->common.layout->ndims; u++) - lt_key->offset[u] = udata->common.offset[u]; + lt_key->scaled[u] = udata->common.scaled[u]; /* * The right key might already be present. If not, then add a zero-width @@ -299,13 +297,11 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, rt_key->nbytes = 0; rt_key->filter_mask = 0; for(u = 0; u < udata->common.layout->ndims; u++) { - HDassert(udata->common.offset[u] + udata->common.layout->dim[u] > - udata->common.offset[u]); - rt_key->offset[u] = udata->common.offset[u] + udata->common.layout->dim[u]; + HDassert(udata->common.scaled[u] + 1 > udata->common.scaled[u]); + rt_key->scaled[u] = udata->common.scaled[u] + 1; } /* end if */ } /* end if */ -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__btree_new_node() */ @@ -345,7 +341,7 @@ H5D__btree_cmp2(void *_lt_key, void *_udata, void *_rt_key) HDassert(udata->layout->ndims > 0 && udata->layout->ndims <= H5O_LAYOUT_NDIMS); /* Compare the offsets but ignore the other fields */ - ret_value = H5VM_vector_cmp_u(udata->layout->ndims, lt_key->offset, rt_key->offset); + ret_value = H5VM_vector_cmp_u(udata->layout->ndims, lt_key->scaled, rt_key->scaled); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__btree_cmp2() */ @@ -400,18 +396,18 @@ H5D__btree_cmp3(void *_lt_key, void *_udata, void *_rt_key) /* indexed storage B-tree... */ /* (Dump the B-tree with h5debug to look at it) -QAK */ if(udata->layout->ndims == 2) { - if(udata->offset[0] > rt_key->offset[0]) + if(udata->scaled[0] > rt_key->scaled[0]) ret_value = 1; - else if(udata->offset[0] == rt_key->offset[0] && - udata->offset[1] >= rt_key->offset[1]) + else if(udata->scaled[0] == rt_key->scaled[0] && + udata->scaled[1] >= rt_key->scaled[1]) ret_value = 1; - else if(udata->offset[0] < lt_key->offset[0]) + else if(udata->scaled[0] < lt_key->scaled[0]) ret_value = (-1); } /* end if */ else { - if(H5VM_vector_ge_u(udata->layout->ndims, udata->offset, rt_key->offset)) + if(H5VM_vector_ge_u(udata->layout->ndims, udata->scaled, rt_key->scaled)) ret_value = 1; - else if(H5VM_vector_lt_u(udata->layout->ndims, udata->offset, lt_key->offset)) + else if(H5VM_vector_lt_u(udata->layout->ndims, udata->scaled, lt_key->scaled)) ret_value = (-1); } /* end else */ @@ -463,13 +459,13 @@ H5D__btree_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void /* Is this *really* the requested chunk? */ for(u = 0; u < udata->common.layout->ndims; u++) - if(udata->common.offset[u] >= lt_key->offset[u] + udata->common.layout->dim[u]) + if(udata->common.scaled[u] >= (lt_key->scaled[u] + 1)) HGOTO_DONE(FALSE) /* Initialize return values */ HDassert(lt_key->nbytes > 0); - udata->addr = addr; - udata->nbytes = lt_key->nbytes; + udata->chunk_block.offset = addr; + udata->chunk_block.length = lt_key->nbytes; udata->filter_mask = lt_key->filter_mask; done: @@ -478,6 +474,44 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__chunk_disjoint + * + * Purpose: Determines if two chunks are disjoint. + * + * Return: Success: FALSE if they are not disjoint. + * TRUE if they are disjoint. + * + * Programmer: Quincey Koziol + * Wednesday, May 6, 2015 + * + * Note: Assumes that the chunk offsets are scaled coordinates + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D__chunk_disjoint(unsigned n, const hsize_t *scaled1, const hsize_t *scaled2) +{ + unsigned u; /* Local index variable */ + hbool_t ret_value = FALSE; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(n); + HDassert(scaled1); + HDassert(scaled2); + + /* Loop over two chunks, detecting disjointness and getting out quickly */ + for(u = 0; u < n; u++) + if((scaled1[u] + 1) <= scaled2[u] || (scaled2[u] + 1) <= scaled1[u]) + HGOTO_DONE(TRUE) + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_disjoint() */ + + +/*------------------------------------------------------------------------- * Function: H5D__btree_insert * * Purpose: This function is called when the B-tree insert engine finds @@ -507,7 +541,7 @@ done: */ /* ARGSUSED */ static H5B_ins_t -H5D__btree_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, +H5D__btree_insert(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t addr, void *_lt_key, hbool_t *lt_key_changed, void *_md_key, void *_udata, void *_rt_key, hbool_t UNUSED *rt_key_changed, @@ -541,68 +575,40 @@ H5D__btree_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error") } else if(H5VM_vector_eq_u(udata->common.layout->ndims, - udata->common.offset, lt_key->offset) && - lt_key->nbytes > 0) { + udata->common.scaled, lt_key->scaled) && lt_key->nbytes > 0) { /* * Already exists. If the new size is not the same as the old size * then we should reallocate storage. */ - if(lt_key->nbytes != udata->nbytes) { -/* Currently, the old chunk data is "thrown away" after the space is reallocated, - * so avoid data copy in H5MF_realloc() call by just free'ing the space and - * allocating new space. - * - * This should keep the file smaller also, by freeing the space and then - * allocating new space, instead of vice versa (in H5MF_realloc). - * - * QAK - 11/19/2002 - */ -#ifdef OLD_WAY - if(HADDR_UNDEF == (*new_node_p = H5MF_realloc(f, H5FD_MEM_DRAW, addr, - (hsize_t)lt_key->nbytes, (hsize_t)udata->nbytes))) - HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk storage") -#else /* OLD_WAY */ - H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t); - if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk") - H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t); - if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes))) - HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk") -#endif /* OLD_WAY */ - lt_key->nbytes = udata->nbytes; + if(lt_key->nbytes != udata->chunk_block.length) { + /* Set node's address (already re-allocated by main chunk routines) */ + HDassert(H5F_addr_defined(udata->chunk_block.offset)); + *new_node_p = udata->chunk_block.offset; + H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t); lt_key->filter_mask = udata->filter_mask; *lt_key_changed = TRUE; - udata->addr = *new_node_p; ret_value = H5B_INS_CHANGE; } else { - udata->addr = addr; + /* Already have address in udata, from main chunk routines */ + HDassert(H5F_addr_defined(udata->chunk_block.offset)); ret_value = H5B_INS_NOOP; } - } else if (H5VM_hyper_disjointp(udata->common.layout->ndims, - lt_key->offset, udata->common.layout->dim, - udata->common.offset, udata->common.layout->dim)) { - HDassert(H5VM_hyper_disjointp(udata->common.layout->ndims, - rt_key->offset, udata->common.layout->dim, - udata->common.offset, udata->common.layout->dim)); + } else if (H5D__chunk_disjoint(udata->common.layout->ndims, + lt_key->scaled, udata->common.scaled)) { + HDassert(H5D__chunk_disjoint(udata->common.layout->ndims, + rt_key->scaled, udata->common.scaled)); /* * Split this node, inserting the new new node to the right of the * current node. The MD_KEY is where the split occurs. */ - md_key->nbytes = udata->nbytes; + H5_CHECKED_ASSIGN(md_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t); md_key->filter_mask = udata->filter_mask; - for(u = 0; u < udata->common.layout->ndims; u++) { - HDassert(0 == udata->common.offset[u] % udata->common.layout->dim[u]); - md_key->offset[u] = udata->common.offset[u]; - } /* end for */ + for(u = 0; u < udata->common.layout->ndims; u++) + md_key->scaled[u] = udata->common.scaled[u]; - /* - * Allocate storage for the new chunk - */ - H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t); - if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes))) - HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "file allocation failed") - udata->addr = *new_node_p; + HDassert(H5F_addr_defined(udata->chunk_block.offset)); + *new_node_p = udata->chunk_block.offset; ret_value = H5B_INS_RIGHT; } else { @@ -669,9 +675,10 @@ done: static herr_t H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key) { - H5D_btree_key_t *key = (H5D_btree_key_t *) _key; - size_t ndims; - unsigned u; + const H5O_layout_chunk_t *layout; /* Chunk layout description */ + H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */ + hsize_t tmp_offset; /* Temporary coordinate offset, from file */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR @@ -679,14 +686,21 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key HDassert(shared); HDassert(raw); HDassert(key); - ndims = H5D_BTREE_NDIMS(shared); - HDassert(ndims <= H5O_LAYOUT_NDIMS); + layout = (const H5O_layout_chunk_t *)shared->udata; + HDassert(layout); + HDassert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); /* decode */ UINT32DECODE(raw, key->nbytes); UINT32DECODE(raw, key->filter_mask); - for(u = 0; u < ndims; u++) - UINT64DECODE(raw, key->offset[u]); + for(u = 0; u < layout->ndims; u++) { + /* Retrieve coordinate offset */ + UINT64DECODE(raw, tmp_offset); + HDassert(0 == (tmp_offset % layout->dim[u])); + + /* Convert to a scaled offset */ + key->scaled[u] = tmp_offset / layout->dim[u]; + } /* end for */ FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__btree_decode_key() */ @@ -707,9 +721,10 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key static herr_t H5D__btree_encode_key(const H5B_shared_t *shared, uint8_t *raw, const void *_key) { + const H5O_layout_chunk_t *layout; /* Chunk layout description */ const H5D_btree_key_t *key = (const H5D_btree_key_t *)_key; - size_t ndims; - unsigned u; + hsize_t tmp_offset; /* Temporary coordinate offset, from file */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR @@ -717,14 +732,18 @@ H5D__btree_encode_key(const H5B_shared_t *shared, uint8_t *raw, const void *_key HDassert(shared); HDassert(raw); HDassert(key); - ndims = H5D_BTREE_NDIMS(shared); - HDassert(ndims <= H5O_LAYOUT_NDIMS); + layout = (const H5O_layout_chunk_t *)shared->udata; + HDassert(layout); + HDassert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); /* encode */ UINT32ENCODE(raw, key->nbytes); UINT32ENCODE(raw, key->filter_mask); - for(u = 0; u < ndims; u++) - UINT64ENCODE(raw, key->offset[u]); + for(u = 0; u < layout->ndims; u++) { + /* Compute coordinate offset from scaled offset */ + tmp_offset = key->scaled[u] * layout->dim[u]; + UINT64ENCODE(raw, tmp_offset); + } /* end for */ FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__btree_encode_key() */ @@ -759,7 +778,7 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key, HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", key->filter_mask); HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); for(u = 0; u < udata->ndims; u++) - HDfprintf(stream, "%s%Hd", u?", ":"", key->offset[u]); + HDfprintf(stream, "%s%Hd", u?", ":"", (key->scaled[u] * udata->common.layout->dim[u])); HDfputs("}\n", stream); FUNC_LEAVE_NOAPI(SUCCEED) @@ -767,6 +786,38 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key, /*------------------------------------------------------------------------- + * Function: H5D__btree_shared_free + * + * Purpose: Free "local" B-tree shared info + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, May 7, 2015 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__btree_shared_free(void *_shared) +{ + H5B_shared_t *shared = (H5B_shared_t *)_shared; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Free the chunk layout information */ + shared->udata = H5FL_FREE(H5O_layout_chunk_t, shared->udata); + + /* Chain up to the generic B-tree shared info free routine */ + if(H5B_shared_free(shared) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't free shared B-tree info") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__btree_shared_free() */ + + +/*------------------------------------------------------------------------- * Function: H5D__btree_shared_create * * Purpose: Create & initialize B-tree shared info @@ -779,9 +830,11 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key, *------------------------------------------------------------------------- */ static herr_t -H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, unsigned ndims) +H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, + const H5O_layout_chunk_t *layout) { H5B_shared_t *shared; /* Shared B-tree node info */ + H5O_layout_chunk_t *my_layout = NULL; /* Pointer to copy of layout info */ size_t sizeof_rkey; /* Size of raw (disk) key */ herr_t ret_value = SUCCEED; /* Return value */ @@ -790,20 +843,27 @@ H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, unsigned nd /* Set the raw key size */ sizeof_rkey = 4 + /*storage size */ 4 + /*filter mask */ - ndims * 8; /*dimension indices */ + layout->ndims * 8; /*dimension indices */ /* Allocate & initialize global info for the shared structure */ if(NULL == (shared = H5B_shared_new(f, H5B_BTREE, sizeof_rkey))) - HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") + HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") /* Set up the "local" information for this dataset's chunks */ - /* <none> */ + if(NULL == (my_layout = H5FL_MALLOC(H5O_layout_chunk_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate chunk layout") + HDmemcpy(my_layout, layout, sizeof(H5O_layout_chunk_t)); + shared->udata = my_layout; /* Make shared B-tree info reference counted */ - if(NULL == (store->u.btree.shared = H5UC_create(shared, H5B_shared_free))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") + if(NULL == (store->u.btree.shared = H5UC_create(shared, H5D__btree_shared_free))) + HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") done: + if(ret_value < 0) + if(my_layout) + my_layout = H5FL_FREE(H5O_layout_chunk_t, my_layout); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__btree_shared_create() */ @@ -839,7 +899,7 @@ H5D__btree_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t UNUSED *spac idx_info->storage->u.btree.dset_ohdr_addr = dset_ohdr_addr; /* Allocate the shared structure */ - if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout->ndims) < 0) + if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") done: @@ -1032,8 +1092,8 @@ H5D__btree_idx_iterate_cb(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, /* Sanity check for memcpy() */ HDcompile_assert(offsetof(H5D_chunk_rec_t, nbytes) == offsetof(H5D_btree_key_t, nbytes)); HDcompile_assert(sizeof(chunk_rec.nbytes) == sizeof(lt_key->nbytes)); - HDcompile_assert(offsetof(H5D_chunk_rec_t, offset) == offsetof(H5D_btree_key_t, offset)); - HDcompile_assert(sizeof(chunk_rec.offset) == sizeof(lt_key->offset)); + HDcompile_assert(offsetof(H5D_chunk_rec_t, scaled) == offsetof(H5D_btree_key_t, scaled)); + HDcompile_assert(sizeof(chunk_rec.scaled) == sizeof(lt_key->scaled)); HDcompile_assert(offsetof(H5D_chunk_rec_t, filter_mask) == offsetof(H5D_btree_key_t, filter_mask)); HDcompile_assert(sizeof(chunk_rec.filter_mask) == sizeof(lt_key->filter_mask)); @@ -1170,7 +1230,7 @@ H5D__btree_idx_delete(const H5D_chk_idx_info_t *idx_info) tmp_storage = *idx_info->storage; /* Set up the shared structure */ - if(H5D__btree_shared_create(idx_info->f, &tmp_storage, idx_info->layout->ndims) < 0) + if(H5D__btree_shared_create(idx_info->f, &tmp_storage, idx_info->layout) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") /* Set up B-tree user data */ @@ -1227,9 +1287,9 @@ H5D__btree_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr)); /* Create shared B-tree info for each file */ - if(H5D__btree_shared_create(idx_info_src->f, idx_info_src->storage, idx_info_src->layout->ndims) < 0) + if(H5D__btree_shared_create(idx_info_src->f, idx_info_src->storage, idx_info_src->layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for source shared B-tree info") - if(H5D__btree_shared_create(idx_info_dst->f, idx_info_dst->storage, idx_info_dst->layout->ndims) < 0) + if(H5D__btree_shared_create(idx_info_dst->f, idx_info_dst->storage, idx_info_dst->layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for destination shared B-tree info") /* Create the root of the B-tree that describes chunked storage in the dest. file */ @@ -1309,7 +1369,7 @@ H5D__btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) HDassert(index_size); /* Initialize the shared info for the B-tree traversal */ - if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout->ndims) < 0) + if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") shared_init = TRUE; @@ -1441,11 +1501,13 @@ done: */ herr_t H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, - int fwidth, unsigned ndims) + int fwidth, unsigned ndims, const uint32_t *dim) { H5D_btree_dbg_t udata; /* User data for B-tree callback */ H5O_storage_chunk_t storage; /* Storage information for B-tree callback */ + H5O_layout_chunk_t layout; /* Layout information for B-tree callback */ hbool_t shared_init = FALSE; /* Whether B-tree shared info is initialized */ + unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -1454,15 +1516,21 @@ H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent HDmemset(&storage, 0, sizeof(storage)); storage.idx_type = H5D_CHUNK_IDX_BTREE; + /* Reset "fake" layout info */ + HDmemset(&layout, 0, sizeof(layout)); + layout.ndims = ndims; + for(u = 0; u < ndims; u++) + layout.dim[u] = dim[u]; + /* Allocate the shared structure */ - if(H5D__btree_shared_create(f, &storage, ndims) < 0) + if(H5D__btree_shared_create(f, &storage, &layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") shared_init = TRUE; /* Set up user data for callback */ - udata.common.layout = NULL; + udata.common.layout = &layout; udata.common.storage = &storage; - udata.common.offset = NULL; + udata.common.scaled = NULL; udata.ndims = ndims; /* Dump the records for the B-tree */ @@ -1472,10 +1540,10 @@ done: if(shared_init) { /* Free the raw B-tree node buffer */ if(NULL == storage.u.btree.shared) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted shared info nil") else if(H5UC_DEC(storage.u.btree.shared) < 0) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted shared info") } /* end if */ FUNC_LEAVE_NOAPI(ret_value) |