diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2015-12-30 18:49:03 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2015-12-30 18:49:03 (GMT) |
commit | a93b688efb56a779faf1453d98567cf03a8e14b8 (patch) | |
tree | 9d2a26252998f26fb16cbf4c2b445e6d723cb4d2 | |
parent | 9b9b8e383e8bad502594a9d8be6c80b15b8c6b08 (diff) | |
download | hdf5-a93b688efb56a779faf1453d98567cf03a8e14b8.zip hdf5-a93b688efb56a779faf1453d98567cf03a8e14b8.tar.gz hdf5-a93b688efb56a779faf1453d98567cf03a8e14b8.tar.bz2 |
[svn-r28755] Description:
Minor cleanups, and remove debugging context create/destroy callbacks
from dataset v2 B-tree class structures.
Tested on:
MacOSX/64 10.11.2 (amazon) w/serial & parallel
(h5committest not required on this branch)
-rw-r--r-- | src/H5Dbtree2.c | 135 | ||||
-rw-r--r-- | src/H5Dchunk.c | 114 | ||||
-rw-r--r-- | src/H5Dpkg.h | 12 |
3 files changed, 77 insertions, 184 deletions
diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index 99dac25..2aa4701 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -90,8 +90,6 @@ static void *H5D__bt2_crt_context(void *udata); static herr_t H5D__bt2_dst_context(void *ctx); static herr_t H5D__bt2_store(void *native, const void *udata); static herr_t H5D__bt2_compare(const void *rec1, const void *rec2); -static void *H5D__bt2_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr); -static herr_t H5D__bt2_dst_dbg_context(void *_u_ctx); /* v2 B-tree class for indexing non-filtered chunked datasets */ static herr_t H5D__bt2_unfilt_encode(uint8_t *raw, const void *native, void *ctx); @@ -189,9 +187,7 @@ const H5B2_class_t H5D_BT2[1] = {{ /* B-tree class information */ H5D__bt2_compare, /* Record comparison callback */ H5D__bt2_unfilt_encode, /* Record encoding callback */ H5D__bt2_unfilt_decode, /* Record decoding callback */ - H5D__bt2_unfilt_debug, /* Record debugging callback */ - H5D__bt2_crt_dbg_context, /* Create debugging context */ - H5D__bt2_dst_dbg_context /* Destroy debugging context */ + H5D__bt2_unfilt_debug /* Record debugging callback */ }}; /* v2 B-tree class for indexing filtered chunked datasets */ @@ -205,9 +201,7 @@ const H5B2_class_t H5D_BT2_FILT[1] = {{ /* B-tree class information */ H5D__bt2_compare, /* Record comparison callback */ H5D__bt2_filt_encode, /* Record encoding callback */ H5D__bt2_filt_decode, /* Record decoding callback */ - H5D__bt2_filt_debug, /* Record debugging callback */ - H5D__bt2_crt_dbg_context, /* Create debugging context */ - H5D__bt2_dst_dbg_context /* Destroy debugging context */ + H5D__bt2_filt_debug /* Record debugging callback */ }}; @@ -377,110 +371,6 @@ H5D__bt2_compare(const void *_udata, const void *_rec2) /*------------------------------------------------------------------------- - * Function: H5D__bt2_crt_dbg_context - * - * Purpose: Create user data for debugged callback context - * - * Return: Success: non-NULL - * Failure: NULL - * - * Programmer: Vailin Choi; June 2010 - * - *------------------------------------------------------------------------- - */ -static void * -H5D__bt2_crt_dbg_context(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, haddr_t obj_addr) -{ - H5D_bt2_ctx_ud_t *u_ctx = NULL; /* User data for creating callback context */ - H5O_loc_t obj_loc; /* Pointer to an object's location */ - hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */ - H5O_layout_t layout; /* Layout message */ - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_STATIC - - /* Sanity check */ - HDassert(f); - HDassert(H5F_addr_defined(obj_addr)); - - /* Set up the object header location info */ - H5O_loc_reset(&obj_loc); - obj_loc.file = f; - obj_loc.addr = obj_addr; - - /* Open the object header where the layout message resides */ - if(H5O_open(&obj_loc) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header") - obj_opened = TRUE; - - /* Read the layout message */ - if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info") - - /* close the object header */ - if(H5O_close(&obj_loc) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") - - /* Allocate structure for storing user data to create callback context */ - if(NULL == (u_ctx = H5FL_MALLOC(H5D_bt2_ctx_ud_t))) - HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate user data context structure ") - - /* Set information for context structure */ - u_ctx->f = f; - u_ctx->chunk_size = layout.u.chunk.size; - u_ctx->ndims = layout.u.chunk.ndims - 1; - - /* Set return value */ - ret_value = u_ctx; - -done: - /* Cleanup on error */ - if(ret_value == NULL) { - /* Release context structure */ - if(u_ctx) - u_ctx = H5FL_FREE(H5D_bt2_ctx_ud_t, u_ctx); - - /* Close object header */ - if(obj_opened) { - if(H5O_close(&obj_loc) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") - } /* end if */ - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5D__bt2_crt_dbg_context() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__bt2_dst_dbg_context - * - * Purpose: Destroy client callback context - * - * Return: Success: non-negative - * Failure: negative - * - * Programmer: Vailin Choi; June 2010 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__bt2_dst_dbg_context(void *_u_ctx) -{ - H5D_bt2_ctx_ud_t *u_ctx = (H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */ - - FUNC_ENTER_STATIC_NOERR - - /* Sanity check */ - HDassert(u_ctx); - - /* Release user data for creating callback context */ - u_ctx = H5FL_FREE(H5D_bt2_ctx_ud_t, u_ctx); - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* H5D__bt2_dst_dbg_context() */ - - -/*------------------------------------------------------------------------- * Function: H5D__bt2_unfilt_encode * * Purpose: Encode native information into raw form for storing on disk @@ -565,23 +455,24 @@ H5D__bt2_unfilt_decode(const uint8_t *raw, void *_record, void *_ctx) */ static herr_t H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth, - const void *_record, const void *_u_ctx) + const void *_record, const void *_ctx) { const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ - const H5D_bt2_ctx_ud_t *u_ctx = (const H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */ + const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */ unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR /* Sanity checks */ HDassert(record); - HDassert(u_ctx->chunk_size == record->nbytes); + HDassert(ctx->chunk_size == record->nbytes); HDassert(0 == record->filter_mask); HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, "Chunk address:", record->chunk_addr); + HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); - for(u = 0; u < u_ctx->ndims; u++) - HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * u_ctx->dim[u]); + for(u = 0; u < ctx->ndims; u++) + HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * ctx->dim[u]); HDfputs("}\n", stream); FUNC_LEAVE_NOAPI(SUCCEED) @@ -671,7 +562,7 @@ H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx) /*------------------------------------------------------------------------- * Function: H5D__bt2_filt_debug * - * Purpose: Debug native form of record (filterd) + * Purpose: Debug native form of record (filtered) * * Return: Success: non-negative * Failure: negative @@ -682,10 +573,10 @@ H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx) */ static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, - const void *_record, const void *_u_ctx) + const void *_record, const void *_ctx) { const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ - const H5D_bt2_ctx_ud_t *u_ctx = (const H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */ + const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */ unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR @@ -700,8 +591,8 @@ H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", record->filter_mask); HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); - for(u = 0; u < u_ctx->ndims; u++) - HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * u_ctx->dim[u]); + for(u = 0; u < ctx->ndims; u++) + HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * ctx->dim[u]); HDfputs("}\n", stream); FUNC_LEAVE_NOAPI(SUCCEED) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index b1073ab..0f6504a 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -261,13 +261,13 @@ static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset); static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush); -static hbool_t H5D__chunk_is_partial_edge_chunk(const hsize_t *chunk_scaled, - unsigned dset_ndims, const hsize_t *dset_dims, const uint32_t *chunk_dims); +static hbool_t H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, + const uint32_t *chunk_dims, const hsize_t *chunk_scaled, const hsize_t *dset_dims); static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, size_t size); static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk); static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, - const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert, + const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert, hsize_t scaled[]); #ifdef H5_HAVE_PARALLEL static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, @@ -1763,11 +1763,12 @@ htri_t H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_op) { const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */ - hbool_t no_filters = TRUE; + hbool_t has_filters = FALSE; /* Whether there are filters on the chunk or not */ htri_t ret_value = FAIL; /* Return value */ FUNC_ENTER_PACKAGE + /* Sanity check */ HDassert(io_info); HDassert(dataset); @@ -1777,16 +1778,19 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_ if(dataset->shared->dcpl_cache.pline.nused > 0) { if(dataset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { - no_filters = H5D__chunk_is_partial_edge_chunk( - io_info->store->chunk.scaled, io_info->dset->shared->ndims, - io_info->dset->shared->curr_dims, - io_info->dset->shared->layout.u.chunk.dim); + has_filters = !H5D__chunk_is_partial_edge_chunk( + io_info->dset->shared->ndims, + io_info->dset->shared->layout.u.chunk.dim, + io_info->store->chunk.scaled, + io_info->dset->shared->curr_dims); } /* end if */ else - no_filters = FALSE; + has_filters = TRUE; } /* end if */ - if(no_filters) { + if(has_filters) + ret_value = TRUE; + else { #ifdef H5_HAVE_PARALLEL /* If MPI based VFD is used and the file is opened for write access, must * bypass the chunk-cache scheme because other MPI processes could @@ -1827,9 +1831,7 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_ #ifdef H5_HAVE_PARALLEL } /* end else */ #endif /* H5_HAVE_PARALLEL */ - } /* end if */ - else - ret_value = TRUE; + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -2582,13 +2584,14 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, H5D_chunk_ud_t *udata) { H5D_rdcc_ent_t *ent = NULL; /* Cache entry */ - hbool_t found = FALSE; /* In cache? */ - unsigned u; /* Counter */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); - herr_t ret_value = SUCCEED; /* Return value */ + unsigned idx; /* Index of chunk in cache, if present */ + hbool_t found = FALSE; /* In cache? */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE + /* Sanity checks */ HDassert(dset); HDassert(dset->shared->layout.u.chunk.ndims > 0); H5D_CHUNK_STORAGE_INDEX_CHK(sc); @@ -2598,7 +2601,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, /* Initialize the query information about the chunk we are looking for */ udata->common.layout = &(dset->shared->layout.u.chunk); udata->common.storage = &(dset->shared->layout.storage.u.chunk); - udata->common.scaled = scaled; + udata->common.scaled = scaled; /* Reset information about the chunk we are looking for */ udata->chunk_block.offset = HADDR_UNDEF; @@ -2608,19 +2611,29 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, /* Check for chunk in cache */ if(dset->shared->cache.chunk.nslots > 0) { - udata->idx_hint = H5D__chunk_hash_val(dset->shared, scaled); - ent = dset->shared->cache.chunk.slot[udata->idx_hint]; + /* Determine the chunk's location in the hash table */ + idx = H5D__chunk_hash_val(dset->shared, scaled); + + /* Get the chunk cache entry for that location */ + ent = dset->shared->cache.chunk.slot[idx]; + if(ent) { + unsigned u; /* Counter */ + + /* Speculatively set the 'found' flag */ + found = TRUE; - if(ent) - for(u = 0, found = TRUE; u < dset->shared->ndims; u++) + /* Verify that the cache entry is the correct chunk */ + for(u = 0; u < dset->shared->ndims; u++) if(scaled[u] != ent->scaled[u]) { found = FALSE; break; } /* end if */ + } /* end if */ } /* end if */ - /* Find chunk addr */ + /* Retrieve chunk addr */ if(found) { + udata->idx_hint = idx; udata->chunk_block.offset = ent->chunk_block.offset; udata->chunk_block.length = ent->chunk_block.length;; udata->chunk_idx = ent->chunk_idx; @@ -3222,9 +3235,9 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { /* Check if this is an edge chunk */ - if(H5D__chunk_is_partial_edge_chunk( - io_info->store->chunk.scaled, io_info->dset->shared->ndims, - io_info->dset->shared->curr_dims, layout->u.chunk.dim)) { + if(H5D__chunk_is_partial_edge_chunk(io_info->dset->shared->ndims, + layout->u.chunk.dim, io_info->store->chunk.scaled, + io_info->dset->shared->curr_dims)) { /* Disable the filters for both writing and reading */ disable_filters = TRUE; old_pline = NULL; @@ -3232,8 +3245,6 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, } /* end if */ } /* end if */ } /* end if */ - else - HDassert(!udata->new_unfilt_chunk && !prev_unfilt_chunk); if(relax) { /* @@ -3461,6 +3472,7 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, FUNC_ENTER_PACKAGE + /* Sanity check */ HDassert(io_info); HDassert(udata); @@ -3468,24 +3480,20 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, /* * It's not in the cache, probably because it's too big. If it's * dirty then flush it to disk. In any case, free the chunk. - * Note: we have to copy the layout and filter messages so we - * don't discard the `const' qualifier. */ hbool_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */ /* Check if we should disable filters on this chunk */ if(udata->new_unfilt_chunk) { - HDassert(layout->u.chunk.flags - & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + HDassert(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); is_unfiltered_edge_chunk = TRUE; } /* end if */ - else if(layout->u.chunk.flags - & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { + else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { /* Check if the chunk is an edge chunk, and disable filters if so */ is_unfiltered_edge_chunk = H5D__chunk_is_partial_edge_chunk( - io_info->store->chunk.scaled, io_info->dset->shared->ndims, - io_info->dset->shared->curr_dims, layout->u.chunk.dim); + io_info->dset->shared->ndims, layout->u.chunk.dim, + io_info->store->chunk.scaled, io_info->dset->shared->curr_dims); } /* end if */ if(dirty) { @@ -3498,7 +3506,6 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, if(udata->new_unfilt_chunk) fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); - HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); HDassert(layout->u.chunk.size > 0); fake_ent.chunk_idx = udata->chunk_idx; fake_ent.chunk_block.offset = udata->chunk_block.offset; @@ -4014,7 +4021,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, && scaled[i] < edge_chunk_scaled[i]) { nunfilt_edge_chunk_dims--; if(should_fill && nunfilt_edge_chunk_dims == 0 && !fb_info.has_vlen_fill_type) { - HDassert(!H5D__chunk_is_partial_edge_chunk(scaled, space_ndims, space_dim, chunk_dim)); + HDassert(!H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim)); fill_buf = &fb_info.fill_buf; chunk_size = orig_chunk_size; } /* end if */ @@ -4026,7 +4033,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, HDassert(edge_chunk_scaled[i] == max_unalloc[i]); nunfilt_edge_chunk_dims++; if(should_fill && nunfilt_edge_chunk_dims == 1 && !fb_info.has_vlen_fill_type) { - HDassert(H5D__chunk_is_partial_edge_chunk(scaled, space_ndims, space_dim, chunk_dim)); + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim)); fill_buf = &unfilt_fill_buf; chunk_size = layout->u.chunk.size; } /* end if */ @@ -4200,8 +4207,8 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[]) int i; /* Local index variable */ /* Make sure the chunk is really a former edge chunk */ - HDassert(H5D__chunk_is_partial_edge_chunk(chunk_sc, space_ndims, old_dim, chunk_dim) - && !H5D__chunk_is_partial_edge_chunk(chunk_sc, space_ndims, space_dim, chunk_dim)); + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, old_dim) + && !H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, space_dim)); /* Lookup the chunk */ if(H5D__chunk_lookup(dset, dxpl_id, chunk_sc, &chk_udata) < 0) @@ -4742,7 +4749,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* Determine if partial edge chunk filters are disabled */ disable_edge_filters = (layout->u.chunk.flags - & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && (idx_info.pline->nused > 0); /* @@ -4776,8 +4783,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* If necessary, check if chunks in this dimension that need to * be filled are new partial edge chunks */ - if(disable_edge_filters && old_dim[op_dim] - >= (min_mod_chunk_sc[op_dim] + 1)) + if(disable_edge_filters && old_dim[op_dim] >= (min_mod_chunk_sc[op_dim] + 1)) new_unfilt_dim[op_dim] = TRUE; else new_unfilt_dim[op_dim] = FALSE; @@ -4837,8 +4843,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) HDassert(scaled[op_dim] == min_mod_chunk_sc[op_dim]); /* Make sure this is an edge chunk */ - HDassert(H5D__chunk_is_partial_edge_chunk(scaled, - space_ndims, space_dim, layout->u.chunk.dim)); + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, space_dim)); /* Determine if the chunk just became an unfiltered chunk */ if(new_unfilt_dim[op_dim]) { @@ -4853,8 +4858,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* Make sure that, if we think this is a new unfiltered chunk, * it was previously not an edge chunk */ HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk != - !H5D__chunk_is_partial_edge_chunk(scaled, - space_ndims, old_dim, layout->u.chunk.dim))); + !H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, old_dim))); HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]); /* Fill the unused parts of the chunk */ @@ -5300,8 +5304,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) if(udata->common.layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { /* Check if the chunk is an edge chunk, and disable filters if so */ - if(!H5D__chunk_is_partial_edge_chunk(chunk_rec->scaled, udata->dset_ndims, - udata->dset_dims, udata->common.layout->dim)) + if(!H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, chunk_rec->scaled, udata->dset_dims)) must_filter = TRUE; } /* end if */ else @@ -6070,8 +6073,8 @@ done: *------------------------------------------------------------------------- */ static hbool_t -H5D__chunk_is_partial_edge_chunk(const hsize_t scaled[], unsigned dset_ndims, - const hsize_t *dset_dims, const uint32_t *chunk_dims) +H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims, + const hsize_t scaled[], const hsize_t *dset_dims) { unsigned u; /* Local index variable */ hbool_t ret_value = FALSE; /* Return value */ @@ -6130,9 +6133,8 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Check for filters on chunks */ if(idx_info->pline->nused > 0) { - - HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); /* Sanity/error checking block */ + HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); { unsigned allow_chunk_size_len; /* Allowed size of encoded chunk size */ unsigned new_chunk_size_len; /* Size of encoded chunk size */ @@ -6228,6 +6230,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_file_alloc() */ + /*------------------------------------------------------------------------- * Function: H5D__chunk_format_convert_cb * @@ -6259,9 +6262,8 @@ H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) chunk_addr = chunk_rec->chunk_addr; if(new_idx_info->pline->nused && - (new_idx_info->layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && - (H5D__chunk_is_partial_edge_chunk(chunk_rec->scaled, udata->dset_ndims, udata->dset_dims, - new_idx_info->layout->dim)) ) { + (new_idx_info->layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && + (H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, new_idx_info->layout->dim, chunk_rec->scaled, udata->dset_dims))) { /* This is a partial non-filtered edge chunk */ /* Convert the chunk to a filtered edge chunk for v1 B-tree chunk index */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index ad16003..46f2ba9 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -272,7 +272,6 @@ typedef struct H5D_chunk_rec_t { * to which the index points. */ typedef struct H5D_chunk_common_ud_t { - /* downward */ const H5O_layout_chunk_t *layout; /* Chunk layout description */ const H5O_storage_chunk_t *storage; /* Chunk storage description */ const hsize_t *scaled; /* Scaled coordinates for a chunk */ @@ -280,14 +279,15 @@ typedef struct H5D_chunk_common_ud_t { /* B-tree callback info for various operations */ typedef struct H5D_chunk_ud_t { + /* Downward */ H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ /* Upward */ - unsigned idx_hint; /*index of chunk in cache, if present */ - H5F_block_t chunk_block; /*offset/length of chunk in file */ - unsigned filter_mask; /*excluded filters */ - hbool_t new_unfilt_chunk; /*whether the chunk just became unfiltered */ - hsize_t chunk_idx; /*chunk index for EA, FA indexing */ + unsigned idx_hint; /* Index of chunk in cache, if present */ + H5F_block_t chunk_block; /* Offset/length of chunk in file */ + unsigned filter_mask; /* Excluded filters */ + hbool_t new_unfilt_chunk; /* Whether the chunk just became unfiltered */ + hsize_t chunk_idx; /* Chunk index for EA, FA indexing */ } H5D_chunk_ud_t; /* Typedef for "generic" chunk callbacks */ |