diff options
Diffstat (limited to 'src/H5Dchunk.c')
-rw-r--r-- | src/H5Dchunk.c | 398 |
1 files changed, 226 insertions, 172 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index b0d935c..af5123c 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -160,6 +160,7 @@ typedef struct H5D_chunk_it_ud4_t { FILE *stream; /* Output stream */ hbool_t header_displayed; /* Node's header is displayed? */ unsigned ndims; /* Number of dimensions for chunk/dataset */ + uint32_t *chunk_dim; /* Chunk dimensions */ } H5D_chunk_it_ud4_t; /* Callback info for nonexistent readvv operation */ @@ -231,6 +232,7 @@ static herr_t H5D__chunk_file_cb(void *elem, hid_t type_id, unsigned ndims, const hsize_t *coords, void *fm); static herr_t H5D__chunk_mem_cb(void *elem, hid_t type_id, unsigned ndims, const hsize_t *coords, void *fm); +static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled); static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset); static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, @@ -317,14 +319,14 @@ H5FL_BLK_DEFINE_STATIC(chunk); *------------------------------------------------------------------------- */ herr_t -H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsize_t *offset, - uint32_t data_size, const void *buf) +H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, + hsize_t *offset, uint32_t data_size, const void *buf) { const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ H5D_chunk_ud_t udata; /* User data for querying chunk info */ - hsize_t chunk_idx; /* Global index of chunk */ H5F_block_t old_chunk; /* Offset/length of old chunk */ H5D_chk_idx_info_t idx_info; /* Chunked index info */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ herr_t ret_value = SUCCEED; /* Return value */ @@ -337,10 +339,11 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") /* Calculate the index of this chunk */ - chunk_idx = H5VM_chunk_index(dset->shared->ndims, offset, layout->u.chunk.dim, layout->u.chunk.down_chunks); + H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled); + scaled[dset->shared->ndims] = 0; /* Find out the file address of the chunk (if any) */ - if(H5D__chunk_lookup(dset, dxpl_id, offset, chunk_idx, &udata) < 0) + if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ @@ -500,7 +503,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset) +H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset) { const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */ uint64_t chunk_size; /* Size of chunk in bytes */ @@ -558,7 +561,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB") /* Retain computed chunk size */ - H5_ASSIGN_OVERFLOW(dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t); + H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t); /* Reset address and pointer of the array struct for the chunked storage index */ if(H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, TRUE) < 0) @@ -627,6 +630,22 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id) H5D__chunk_cinfo_cache_reset(&(rdcc->last)); } /* end else */ + /* Compute scaled dimension info, if dataset dims > 1 */ + if(dset->shared->ndims > 1) { + unsigned u; /* Local index value */ + + for(u = 0; u < dset->shared->ndims; u++) { + /* Initial scaled dimension sizes */ + rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; + + /* Inital 'power2up' values for scaled dimensions */ + rdcc->scaled_power2up[u] = H5VM_power2up(rdcc->scaled_dims[u]); + + /* Number of bits required to encode scaled dimension size */ + rdcc->scaled_encode_bits[u] = H5VM_log2_gen(rdcc->scaled_power2up[u]); + } /* end for */ + } /* end if */ + /* Compose chunked index info struct */ idx_info.f = f; idx_info.dxpl_id = dxpl_id; @@ -703,7 +722,6 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ H5SL_node_t *curr_node; /* Current node in skip list */ - H5S_sel_type fsel_type; /* Selection type on disk */ char bogus; /* "bogus" buffer to pass to selection iterator */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -718,7 +736,7 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number") /* Set the number of dimensions for the memory dataspace */ - H5_ASSIGN_OVERFLOW(fm->m_ndims, sm_ndims, int, unsigned); + H5_CHECKED_ASSIGN(fm->m_ndims, unsigned, sm_ndims, int); /* Get rank for file dataspace */ fm->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1; @@ -817,13 +835,13 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf fm->use_single = FALSE; /* Get type of selection on disk & in memory */ - if((fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE) + if((fm->fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") if((fm->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") /* If the selection is NONE or POINTS, set the flag to FALSE */ - if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE) + if(fm->fsel_type == H5S_SEL_POINTS || fm->fsel_type == H5S_SEL_NONE) sel_hyper_flag = FALSE; else sel_hyper_flag = TRUE; @@ -1083,7 +1101,7 @@ H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline) REVISION LOG --------------------------------------------------------------------------*/ static herr_t -H5D__free_chunk_info(void *item, void UNUSED *key, void UNUSED *opdata) +H5D__free_chunk_info(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *opdata) { H5D_chunk_info_t *chunk_info = (H5D_chunk_info_t *)item; @@ -1123,11 +1141,12 @@ H5D__free_chunk_info(void *item, void UNUSED *key, void UNUSED *opdata) static herr_t H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t #ifndef H5_HAVE_PARALLEL - UNUSED + H5_ATTR_UNUSED #endif /* H5_HAVE_PARALLEL */ *io_info) { H5D_chunk_info_t *chunk_info; /* Chunk information to insert into skip list */ + hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk */ hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ unsigned u; /* Local index variable */ @@ -1149,19 +1168,20 @@ H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t /* Set chunk location & hyperslab size */ for(u = 0; u < fm->f_ndims; u++) { HDassert(sel_start[u] == sel_end[u]); - chunk_info->coords[u] = (sel_start[u] / fm->layout->u.chunk.dim[u]) * fm->layout->u.chunk.dim[u]; + chunk_info->scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u]; + coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u]; } /* end for */ - chunk_info->coords[fm->f_ndims] = 0; + chunk_info->scaled[fm->f_ndims] = 0; /* Calculate the index of this chunk */ - chunk_info->index = H5VM_chunk_index(fm->f_ndims, chunk_info->coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks); + chunk_info->index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, chunk_info->scaled); /* Copy selection for file's dataspace into chunk dataspace */ if(H5S_select_copy(fm->single_space, fm->file_space, FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file selection") /* Move selection back to have correct offset in chunk */ - if(H5S_SELECT_ADJUST_U(fm->single_space, chunk_info->coords) < 0) + if(H5S_SELECT_ADJUST_U(fm->single_space, coords) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") #ifdef H5_HAVE_PARALLEL @@ -1203,7 +1223,7 @@ done: static herr_t H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t #ifndef H5_HAVE_PARALLEL - UNUSED + H5_ATTR_UNUSED #endif /* H5_HAVE_PARALLEL */ *io_info) { @@ -1214,6 +1234,8 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ hsize_t end[H5O_LAYOUT_NDIMS]; /* Final coordinates of chunk */ hsize_t chunk_index; /* Index of chunk */ + hsize_t start_scaled[H5S_MAX_RANK]; /* Starting scaled coordinates of selection */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ int curr_dim; /* Current dimension to increment */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1232,13 +1254,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t /* Set initial chunk location & hyperslab size */ for(u = 0; u < fm->f_ndims; u++) { - start_coords[u] = (sel_start[u] / fm->layout->u.chunk.dim[u]) * fm->layout->u.chunk.dim[u]; - coords[u] = start_coords[u]; + scaled[u] = start_scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u]; + coords[u] = start_coords[u] = scaled[u] * fm->layout->u.chunk.dim[u]; end[u] = (coords[u] + fm->chunk_dim[u]) - 1; } /* end for */ /* Calculate the index of this chunk */ - chunk_index = H5VM_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks); + chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled); /* Iterate through each chunk in the dataset */ while(sel_points) { @@ -1304,9 +1326,9 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t new_chunk_info->mspace=NULL; new_chunk_info->mspace_shared = FALSE; - /* Copy the chunk's coordinates */ - HDmemcpy(new_chunk_info->coords, coords, sizeof(hsize_t) * fm->f_ndims); - new_chunk_info->coords[fm->f_ndims] = 0; + /* Copy the chunk's scaled coordinates */ + HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); + new_chunk_info->scaled[fm->f_ndims] = 0; /* Insert the new chunk into the skip list */ if(H5SL_insert(fm->sel_chunks, new_chunk_info, &new_chunk_info->index) < 0) { @@ -1317,7 +1339,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t /* Get number of elements selected in chunk */ if((schunk_points = H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements") - H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points, schunk_points, hssize_t, uint32_t); + H5_CHECKED_ASSIGN(new_chunk_info->chunk_points, uint32_t, schunk_points, hssize_t); /* Decrement # of points left in file selection */ sel_points -= (hsize_t)schunk_points; @@ -1337,11 +1359,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t); coords[curr_dim]+=fm->chunk_dim[curr_dim]; end[curr_dim]+=fm->chunk_dim[curr_dim]; + scaled[curr_dim]++; /* Bring chunk location back into bounds, if necessary */ if(coords[curr_dim] > sel_end[curr_dim]) { do { /* Reset current dimension's location to 0 */ + scaled[curr_dim] = start_scaled[curr_dim]; coords[curr_dim] = start_coords[curr_dim]; /*lint !e771 The start_coords will always be initialized */ end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1; @@ -1349,12 +1373,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t curr_dim--; /* Increment chunk location in current dimension */ + scaled[curr_dim]++; coords[curr_dim] += fm->chunk_dim[curr_dim]; end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1; } while(coords[curr_dim] > sel_end[curr_dim]); /* Re-calculate the index of this chunk */ - chunk_index = H5VM_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks); + chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled); } /* end if */ } /* end while */ @@ -1455,10 +1480,16 @@ H5D__create_chunk_mem_map_hyper(const H5D_chunk_map_t *fm) if(H5S_select_copy(chunk_info->mspace,chunk_info->fspace,FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection") - /* Compensate for the chunk offset */ - for(u=0; u<fm->f_ndims; u++) { - H5_CHECK_OVERFLOW(chunk_info->coords[u],hsize_t,hssize_t); - chunk_adjust[u]=adjust[u]-(hssize_t)chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */ + /* Compute the adjustment for this chunk */ + for(u = 0; u < fm->f_ndims; u++) { + hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ + + /* Compute the chunk coordinates from the scaled coordinates */ + coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u]; + + /* Compensate for the chunk offset */ + H5_CHECK_OVERFLOW(coords[u], hsize_t, hssize_t); + chunk_adjust[u] = adjust[u] - (hssize_t)coords[u]; /*lint !e771 The adjust array will always be initialized */ } /* end for */ /* Adjust the selection */ @@ -1489,7 +1520,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_udata) +H5D__chunk_file_cb(void H5_ATTR_UNUSED *elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_udata) { H5D_chunk_file_iter_ud_t *udata = (H5D_chunk_file_iter_ud_t *)_udata; /* User data for operation */ H5D_chunk_map_t *fm = udata->fm; /* File<->memory chunk mapping info */ @@ -1553,12 +1584,9 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons /* Set the number of selected elements in chunk to zero */ chunk_info->chunk_points = 0; - /* Compute the chunk's coordinates */ - for(u = 0; u < fm->f_ndims; u++) { - H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u], hsize_t, hssize_t); - chunk_info->coords[u] = scaled[u] * (hssize_t)fm->layout->u.chunk.dim[u]; - } /* end for */ - chunk_info->coords[fm->f_ndims] = 0; + /* Set the chunk's scaled coordinates */ + HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); + chunk_info->scaled[fm->f_ndims] = 0; /* Insert the new chunk into the skip list */ if(H5SL_insert(fm->sel_chunks,chunk_info,&chunk_info->index) < 0) { @@ -1580,7 +1608,7 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons /* Get the offset of the element within the chunk */ for(u = 0; u < fm->f_ndims; u++) - coords_in_chunk[u] = coords[u] - chunk_info->coords[u]; + coords_in_chunk[u] = coords[u] - (scaled[u] * fm->layout->u.chunk.dim[u]); /* Add point to file selection for chunk */ if(H5S_select_elements(chunk_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0) @@ -1609,7 +1637,7 @@ done: */ /* ARGSUSED */ static herr_t -H5D__chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm) +H5D__chunk_mem_cb(void H5_ATTR_UNUSED *elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm) { H5D_chunk_map_t *fm = (H5D_chunk_map_t *)_fm; /* File<->memory chunk mapping info */ H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ @@ -1761,7 +1789,7 @@ done: */ static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t UNUSED nelmts, const H5S_t UNUSED *file_space, const H5S_t UNUSED *mem_space, + hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm) { H5SL_node_t *chunk_node; /* Current node in chunk skip list */ @@ -1793,7 +1821,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; /* Initialize temporary contiguous storage info */ - H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t); + H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t); /* Set up compact I/O info object */ HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); @@ -1831,7 +1859,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); /* Get the info for the chunk in the file */ - if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0) + if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ @@ -1849,14 +1877,15 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") if(cacheable) { - /* Pass in chunk's coordinates in a union. */ - io_info->store->chunk.offset = chunk_info->coords; - io_info->store->chunk.index = chunk_info->index; + /* Load the chunk into cache and lock it. */ /* Compute # of bytes accessed in chunk */ H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t); src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size; + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; + /* Lock the chunk into the cache */ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") @@ -1912,7 +1941,7 @@ done: */ static herr_t H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t UNUSED nelmts, const H5S_t UNUSED *file_space, const H5S_t UNUSED *mem_space, + hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm) { H5SL_node_t *chunk_node; /* Current node in chunk skip list */ @@ -1938,7 +1967,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; /* Initialize temporary contiguous storage info */ - H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t); + H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t); /* Set up compact I/O info object */ HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); @@ -1963,7 +1992,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); /* Look up the chunk */ - if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0) + if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ @@ -1978,19 +2007,19 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, * simply allocate space instead of load the chunk. */ hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ - /* Pass in chunk's coordinates in a union. */ - io_info->store->chunk.offset = chunk_info->coords; - io_info->store->chunk.index = chunk_info->index; - /* Compute # of bytes accessed in chunk */ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t); dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size; /* Determine if we will access all the data in the chunk */ if(dst_accessed_bytes != ctg_store.contig.dset_size || - (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size) + (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || + fm->fsel_type == H5S_SEL_POINTS) entire_chunk = FALSE; + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; + /* Lock the chunk into the cache */ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") @@ -2242,13 +2271,12 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud HDassert(last); HDassert(udata); HDassert(udata->common.layout); - HDassert(udata->common.storage); - HDassert(udata->common.offset); + HDassert(udata->common.scaled); /* Stored the information to cache */ - HDmemcpy(last->offset, udata->common.offset, sizeof(hsize_t) * udata->common.layout->ndims); + HDmemcpy(last->scaled, udata->common.scaled, sizeof(hsize_t) * udata->common.layout->ndims); last->addr = udata->chunk_block.offset; - H5_ASSIGN_OVERFLOW(last->nbytes, udata->chunk_block.length, hsize_t, uint32_t); + H5_CHECKED_ASSIGN(last->nbytes, uint32_t, udata->chunk_block.length, hsize_t); last->filter_mask = udata->filter_mask; /* Indicate that the cached info is valid */ @@ -2281,16 +2309,15 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda HDassert(last); HDassert(udata); HDassert(udata->common.layout); - HDassert(udata->common.storage); - HDassert(udata->common.offset); + HDassert(udata->common.scaled); /* Check if the cached information is what is desired */ if(last->valid) { unsigned u; /* Local index variable */ - /* Check that the offset is the same */ + /* Check that the scaled offset is the same */ for(u = 0; u < udata->common.layout->ndims; u++) - if(last->offset[u] != udata->common.offset[u]) + if(last->scaled[u] != udata->common.scaled[u]) HGOTO_DONE(FALSE) /* Retrieve the information from the cache */ @@ -2360,6 +2387,53 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__chunk_hash_val + * + * Purpose: To calculate an index based on the dataset's scaled coordinates and + * sizes of the faster dimensions. + * + * Return: Hash value index + * + * Programmer: Vailin Choi; Nov 2014 + * + *------------------------------------------------------------------------- + */ +static unsigned +H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) +{ + hsize_t val; /* Intermediate value */ + unsigned ndims = shared->ndims; /* Rank of dataset */ + unsigned ret; /* Value to return */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(shared); + HDassert(scaled); + + /* If the fastest changing dimension doesn't have enough entropy, use + * other dimensions too + */ + if(ndims > 1 && shared->cache.chunk.scaled_dims[ndims - 1] <= shared->cache.chunk.nslots) { + unsigned u; /* Local index variable */ + + val = scaled[0]; + for(u = 1; u < ndims; u++) { + val <<= shared->cache.chunk.scaled_encode_bits[u]; + val ^= scaled[u]; + } /* end for */ + } /* end if */ + else + val = scaled[ndims - 1]; + + /* Modulo value against the number of array slots */ + ret = (unsigned)(val % shared->cache.chunk.nslots); + + FUNC_LEAVE_NOAPI(ret) +} /* H5D__chunk_hash_val() */ + + +/*------------------------------------------------------------------------- * Function: H5D__chunk_lookup * * Purpose: Loops up a chunk in cache and on disk, and retrieves @@ -2373,8 +2447,8 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset, - hsize_t chunk_idx, H5D_chunk_ud_t *udata) +H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, + H5D_chunk_ud_t *udata) { H5D_rdcc_ent_t *ent = NULL; /* Cache entry */ hbool_t found = FALSE; /* In cache? */ @@ -2385,13 +2459,13 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset, HDassert(dset); HDassert(dset->shared->layout.u.chunk.ndims > 0); - HDassert(chunk_offset); + HDassert(scaled); HDassert(udata); /* Initialize the query information about the chunk we are looking for */ udata->common.layout = &(dset->shared->layout.u.chunk); udata->common.storage = &(dset->shared->layout.storage.u.chunk); - udata->common.offset = chunk_offset; + udata->common.scaled = scaled; /* Reset information about the chunk we are looking for */ udata->chunk_block.offset = HADDR_UNDEF; @@ -2400,12 +2474,12 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset, /* Check for chunk in cache */ if(dset->shared->cache.chunk.nslots > 0) { - udata->idx_hint = H5D_CHUNK_HASH(dset->shared, chunk_idx); + udata->idx_hint = H5D__chunk_hash_val(dset->shared, scaled); ent = dset->shared->cache.chunk.slot[udata->idx_hint]; if(ent) for(u = 0, found = TRUE; u < dset->shared->ndims; u++) - if(chunk_offset[u] != ent->offset[u]) { + if(scaled[u] != ent->scaled[u]) { found = FALSE; break; } /* end if */ @@ -2486,7 +2560,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t /* Set up user data for index callbacks */ udata.common.layout = &dset->shared->layout.u.chunk; udata.common.storage = &dset->shared->layout.storage.u.chunk; - udata.common.offset = ent->offset; + udata.common.scaled = ent->scaled; udata.chunk_block.offset = ent->chunk_block.offset; udata.chunk_block.length = dset->shared->layout.u.chunk.size; udata.filter_mask = 0; @@ -2517,7 +2591,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t point_of_no_return = TRUE; ent->chunk = NULL; } /* end else */ - H5_ASSIGN_OVERFLOW(nbytes, udata.chunk_block.length, hsize_t, size_t); + H5_CHECKED_ASSIGN(nbytes, size_t, udata.chunk_block.length, hsize_t); if(H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed") @@ -2526,7 +2600,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t if(nbytes > ((size_t)0xffffffff)) HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length") #endif /* H5_SIZEOF_SIZE_T > 4 */ - H5_ASSIGN_OVERFLOW(udata.chunk_block.length, nbytes, size_t, hsize_t); + H5_CHECKED_ASSIGN(udata.chunk_block.length, hsize_t, nbytes, size_t); /* Indicate that the chunk must be allocated */ must_alloc = TRUE; @@ -2835,7 +2909,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Get the chunk's size */ HDassert(layout->u.chunk.size > 0); - H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t); /* Check if the chunk is in the cache */ if(UINT_MAX != udata->idx_hint) { @@ -2852,7 +2926,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Make sure this is the right chunk */ for(u = 0; u < layout->u.chunk.ndims; u++) - HDassert(io_info->store->chunk.offset[u] == ent->offset[u]); + HDassert(io_info->store->chunk.scaled[u] == ent->scaled[u]); } #endif /* NDEBUG */ @@ -2979,7 +3053,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* See if the chunk can be cached */ if(rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max) { /* Calculate the index */ - udata->idx_hint = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index); + udata->idx_hint = H5D__chunk_hash_val(io_info->dset->shared, udata->common.scaled); /* Add the chunk to the cache only if the slot is not already locked */ ent = rdcc->slot[udata->idx_hint]; @@ -2999,9 +3073,9 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Initialize the new entry */ ent->chunk_block.offset = chunk_addr; ent->chunk_block.length = chunk_alloc; - HDmemcpy(ent->offset, io_info->store->chunk.offset, sizeof(hsize_t) * layout->u.chunk.ndims); - H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t); - H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t); + HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); + H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t); + H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t); ent->chunk = (uint8_t *)chunk; /* Add it to the cache */ @@ -3106,7 +3180,7 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, HDmemset(&fake_ent, 0, sizeof(fake_ent)); fake_ent.dirty = TRUE; - HDmemcpy(fake_ent.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(fake_ent.offset[0])); + HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); HDassert(layout->u.chunk.size > 0); fake_ent.chunk_block.offset = udata->chunk_block.offset; fake_ent.chunk_block.length = udata->chunk_block.length; @@ -3254,9 +3328,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, { H5D_chk_idx_info_t idx_info; /* Chunked index info */ const H5D_chunk_ops_t *ops = dset->shared->layout.storage.u.chunk.ops; /* Chunk operations */ - hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated */ - hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */ - hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ + hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated (in scaled coordinates) */ + hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated (in scaled coordinates) */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Offset of current chunk (in scaled coordinates) */ size_t orig_chunk_size; /* Original size of chunk in bytes */ size_t chunk_size; /* Actual size of chunk in bytes, possibly filtered */ unsigned filter_mask = 0; /* Filter mask for chunks that have them */ @@ -3292,8 +3366,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, space_dim = dset->shared->curr_dims; space_ndims = dset->shared->ndims; - /* The last dimension in chunk_offset is always 0 */ - chunk_offset[space_ndims] = (hsize_t)0; + /* The last dimension in scaled chunk coordinates is always 0 */ + scaled[space_ndims] = (hsize_t)0; /* Check if any space dimensions are 0, if so we do not have to do anything */ @@ -3321,7 +3395,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") /* Get original chunk size */ - H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t); + H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t); /* Check the dataset's fill-value status */ if(H5P_is_fill_value_defined(fill, &fill_status) < 0) @@ -3380,10 +3454,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, * that we assume here that all elements of space_dim are > 0. This is * checked at the top of this function. */ for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) { - min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1) - / chunk_dim[op_dim]) * chunk_dim[op_dim]; - max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim]) - * chunk_dim[op_dim]; + min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim]; + max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim]; } /* end for */ /* Loop over all chunks */ @@ -3403,6 +3475,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, * Every time the algorithm finishes allocating chunks allocated beyond a * certain dimension, max_unalloc is updated in order to avoid allocating * those chunks again. + * + * Note that min_unalloc & max_unalloc are in scaled coordinates. + * */ for(op_dim = 0; op_dim < space_ndims; op_dim++) { H5D_chunk_ud_t udata; /* User data for querying chunk info */ @@ -3413,8 +3488,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, continue; else { /* Reset the chunk offset indices */ - HDmemset(chunk_offset, 0, (space_ndims * sizeof(chunk_offset[0]))); - chunk_offset[op_dim] = min_unalloc[op_dim]; + HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0]))); + scaled[op_dim] = min_unalloc[op_dim]; carry = FALSE; } /* end else */ @@ -3428,11 +3503,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, #ifndef NDEBUG /* None of the chunks should be allocated */ { - hsize_t chunk_idx; - /* Look up this chunk */ - chunk_idx = H5VM_chunk_index(space_ndims, chunk_offset, layout->u.chunk.dim, layout->u.chunk.down_chunks); - if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chunk_idx, &udata) < 0) + if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") HDassert(!H5F_addr_defined(udata.chunk_block.offset)); @@ -3445,8 +3517,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, hbool_t outside_orig = FALSE; for(u = 0; u < space_ndims; u++) { - HDassert(chunk_offset[u] < space_dim[u]); - if(chunk_offset[u] >= old_dim[u]) + HDassert((scaled[u] * chunk_dim[u]) < space_dim[u]); + if((scaled[u] * chunk_dim[u]) >= old_dim[u]) outside_orig = TRUE; } /* end for */ HDassert(outside_orig); @@ -3496,9 +3568,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, /* Initialize the chunk information */ udata.common.layout = &layout->u.chunk; udata.common.storage = &layout->storage.u.chunk; - udata.common.offset = chunk_offset; + udata.common.scaled = scaled; udata.chunk_block.offset = HADDR_UNDEF; - H5_ASSIGN_OVERFLOW(udata.chunk_block.length, chunk_size, size_t, uint32_t); + H5_CHECKED_ASSIGN(udata.chunk_block.length, uint32_t, chunk_size, size_t); udata.filter_mask = filter_mask; /* Allocate the chunk (with all processes) */ @@ -3549,12 +3621,12 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, /* Increment indices and adjust the edge chunk state */ carry = TRUE; for(i = ((int)space_ndims - 1); i >= 0; --i) { - chunk_offset[i] += chunk_dim[i]; - if(chunk_offset[i] > max_unalloc[i]) { + scaled[i]++; + if(scaled[i] > max_unalloc[i]) { if((unsigned)i == op_dim) - chunk_offset[i] = min_unalloc[i]; + scaled[i] = min_unalloc[i]; else - chunk_offset[i] = 0; + scaled[i] = 0; } /* end if */ else { carry = FALSE; @@ -3569,7 +3641,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, if(min_unalloc[op_dim] == 0) break; else - max_unalloc[op_dim] = min_unalloc[op_dim] - chunk_dim[op_dim]; + max_unalloc[op_dim] = min_unalloc[op_dim] - 1; } /* end for(op_dim=0...) */ #ifdef H5_HAVE_PARALLEL @@ -3656,9 +3728,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, leftover_blocks = chunk_info->num_io % mpi_size; /* Cast values to types needed by MPI */ - H5_ASSIGN_OVERFLOW(blocks, num_blocks, size_t, int); - H5_ASSIGN_OVERFLOW(leftover, leftover_blocks, size_t, int); - H5_ASSIGN_OVERFLOW(block_len, chunk_size, size_t, int); + H5_CHECKED_ASSIGN(blocks, int, num_blocks, size_t); + H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t); + H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t); /* Allocate buffers */ /* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */ @@ -3771,7 +3843,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ unsigned rank = udata->common.layout->ndims - 1; /* Dataset rank */ - const hsize_t *chunk_offset = io_info->store->chunk.offset; /* Chunk offset */ + const hsize_t *scaled = udata->common.scaled; /* Scaled chunk offset */ H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */ hssize_t sel_nelmts; /* Number of elements in selection */ hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */ @@ -3787,10 +3859,10 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) /* Get the chunk's size */ HDassert(layout->u.chunk.size > 0); - H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t); /* Get the info for the chunk in the file */ - if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset, io_info->store->chunk.index, &chk_udata) < 0) + if(H5D__chunk_lookup(dset, io_info->dxpl_id, scaled, &chk_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* If this chunk does not exist in cache or on disk, no need to do anything */ @@ -3810,7 +3882,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) /* Compute the # of elements to leave with existing value, in each dimension */ for(u = 0; u < rank; u++) { - count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u] - chunk_offset[u])); + count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u] - (scaled[u] * layout->u.chunk.dim[u]))); HDassert(count[u] > 0); } /* end for */ @@ -3973,13 +4045,10 @@ done: herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) { - hsize_t min_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first chunk to modify in each dimension */ - hsize_t max_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk to modify in each dimension */ - hssize_t max_fill_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk that might be filled in each dimension */ + hsize_t min_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of first chunk to modify in each dimension */ + hsize_t max_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk to modify in each dimension */ + hssize_t max_fill_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk that might be filled in each dimension */ hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */ - hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */ - int ndims_outside_fill = 0; /* Number of dimensions in chunk offset outside fill dimensions */ - hbool_t has_fill = FALSE; /* Whether there are chunks that must be filled */ H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_io_info_t chk_io_info; /* Chunked I/O info object */ H5D_storage_t chk_store; /* Chunk storage information */ @@ -3987,7 +4056,6 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ - H5D_rdcc_ent_t *ent = NULL; /* Cache entry */ unsigned space_ndims; /* Dataset's space rank */ const hsize_t *space_dim; /* Current dataspace dimensions */ unsigned op_dim; /* Current operating dimension */ @@ -3997,10 +4065,9 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) H5D_chunk_common_ud_t idx_udata; /* User data for index removal routine */ H5S_t *chunk_space = NULL; /* Dataspace for a chunk */ hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */ - hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled offset of current chunk */ hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */ uint32_t elmts_per_chunk; /* Elements in chunk */ - hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -4019,8 +4086,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) space_dim = dset->shared->curr_dims; space_ndims = dset->shared->ndims; - /* The last dimension in chunk_offset is always 0 */ - chunk_offset[space_ndims] = (hsize_t)0; + /* The last dimension in scaled is always 0 */ + scaled[space_ndims] = (hsize_t)0; /* Check if any old dimensions are 0, if so we do not have to do anything */ for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) @@ -4039,7 +4106,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) for(u = 0; u < space_ndims; u++) { elmts_per_chunk *= layout->u.chunk.dim[u]; chunk_dim[u] = layout->u.chunk.dim[u]; - shrunk_dim[u] = space_dim[u] < old_dim[u]; + shrunk_dim[u] = (space_dim[u] < old_dim[u]); } /* end for */ /* Create a dataspace for a chunk & set the extent */ @@ -4051,9 +4118,9 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) HDmemset(hyper_start, 0, sizeof(hyper_start)); /* Set up chunked I/O info object, for operations on chunks (in callback) - * Note that we only need to set chunk_offset once, as the array's address + * Note that we only need to set scaled once, as the array's address * will never change. */ - chk_store.chunk.offset = chunk_offset; + chk_store.chunk.scaled = scaled; H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL); /* Compose chunked index info struct */ @@ -4067,6 +4134,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) HDmemset(&udata, 0, sizeof udata); udata.common.layout = &layout->u.chunk; udata.common.storage = &layout->storage.u.chunk; + udata.common.scaled = scaled; udata.io_info = &chk_io_info; udata.idx_info = &idx_info; udata.space_dim = space_dim; @@ -4088,16 +4156,14 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) { /* Calculate the largest offset of chunks that might need to be * modified in this dimension */ - max_mod_chunk_off[op_dim] = chunk_dim[op_dim] * ((old_dim[op_dim] - 1) - / chunk_dim[op_dim]); + max_mod_chunk_off[op_dim] = (old_dim[op_dim] - 1) / chunk_dim[op_dim]; /* Calculate the largest offset of chunks that might need to be * filled in this dimension */ if(0 == space_dim[op_dim]) max_fill_chunk_off[op_dim] = -1; else - max_fill_chunk_off[op_dim] = (hssize_t)(chunk_dim[op_dim] - * ((MIN(space_dim[op_dim], old_dim[op_dim]) - 1) + max_fill_chunk_off[op_dim] = (hssize_t)(((MIN(space_dim[op_dim], old_dim[op_dim]) - 1) / chunk_dim[op_dim])); if(shrunk_dim[op_dim]) { @@ -4105,14 +4171,11 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) * modified in this dimension. Note that this array contains * garbage for all dimensions which are not shrunk. These locations * must not be read from! */ - min_mod_chunk_off[op_dim] = chunk_dim[op_dim] * (space_dim[op_dim] - / chunk_dim[op_dim]); + min_mod_chunk_off[op_dim] = space_dim[op_dim] / chunk_dim[op_dim]; /* Determine if we need to fill chunks in this dimension */ - if((hssize_t)min_mod_chunk_off[op_dim] == max_fill_chunk_off[op_dim]) { + if((hssize_t)min_mod_chunk_off[op_dim] == max_fill_chunk_off[op_dim]) fill_dim[op_dim] = TRUE; - has_fill = TRUE; - } /* end if */ else fill_dim[op_dim] = FALSE; } /* end if */ @@ -4122,38 +4185,38 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* Main loop: fill or remove chunks */ for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) { + hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */ + int ndims_outside_fill; /* Number of dimensions in chunk offset outside fill dimensions */ + hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ + /* Check if modification along this dimension is really necessary */ if(!shrunk_dim[op_dim]) continue; else { - HDassert((hsize_t) max_mod_chunk_off[op_dim] >= min_mod_chunk_off[op_dim]); + HDassert(max_mod_chunk_off[op_dim] >= min_mod_chunk_off[op_dim]); /* Reset the chunk offset indices */ - HDmemset(chunk_offset, 0, (space_ndims * sizeof(chunk_offset[0]))); - chunk_offset[op_dim] = min_mod_chunk_off[op_dim]; + HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0]))); + scaled[op_dim] = min_mod_chunk_off[op_dim]; /* Initialize "dims_outside_fill" array */ ndims_outside_fill = 0; for(u = 0; u < space_ndims; u++) - if((hssize_t)chunk_offset[u] > max_fill_chunk_off[u]) { + if((hssize_t)scaled[u] > max_fill_chunk_off[u]) { dims_outside_fill[u] = TRUE; ndims_outside_fill++; } /* end if */ else dims_outside_fill[u] = FALSE; - - carry = FALSE; } /* end if */ + carry = FALSE; while(!carry) { int i; /* Local index variable */ - /* Calculate the index of this chunk */ - chk_io_info.store->chunk.index = H5VM_chunk_index(space_ndims, chunk_offset, layout->u.chunk.dim, layout->u.chunk.down_chunks); - if(0 == ndims_outside_fill) { HDassert(fill_dim[op_dim]); - HDassert(chunk_offset[op_dim] == min_mod_chunk_off[op_dim]); + HDassert(scaled[op_dim] == min_mod_chunk_off[op_dim]); /* Fill the unused parts of the chunk */ if(H5D__chunk_prune_fill(&udata) < 0) @@ -4168,7 +4231,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) hbool_t outside_dim = FALSE; for(u = 0; u < space_ndims; u++) - if(chunk_offset[u] >= space_dim[u]) { + if((scaled[u] * chunk_dim[u]) >= space_dim[u]) { outside_dim = TRUE; break; } /* end if */ @@ -4177,7 +4240,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) #endif /* NDEBUG */ /* Check if the chunk exists in cache or on disk */ - if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chk_io_info.store->chunk.index, &chk_udata) < 0) + if(H5D__chunk_lookup(dset, dxpl_id, scaled, &chk_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk") /* Evict the entry from the cache if present, but do not flush @@ -4189,7 +4252,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* Remove the chunk from disk, if present */ if(H5F_addr_defined(chk_udata.chunk_block.offset)) { /* Update the offset in idx_udata */ - idx_udata.offset = chunk_offset; + idx_udata.scaled = scaled; /* Remove the chunk from disk */ if((layout->storage.u.chunk.ops->remove)(&idx_info, &idx_udata) < 0) @@ -4200,19 +4263,19 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) /* Increment indices */ carry = TRUE; for(i = (int)(space_ndims - 1); i >= 0; --i) { - chunk_offset[i] += chunk_dim[i]; - if(chunk_offset[i] > (hsize_t) max_mod_chunk_off[i]) { + scaled[i]++; + if(scaled[i] > max_mod_chunk_off[i]) { /* Left maximum dimensions, "wrap around" and check if this * dimension is no longer outside the fill dimension */ if((unsigned)i == op_dim) { - chunk_offset[i] = min_mod_chunk_off[i]; + scaled[i] = min_mod_chunk_off[i]; if(dims_outside_fill[i] && fill_dim[i]) { dims_outside_fill[i] = FALSE; ndims_outside_fill--; } /* end if */ } /* end if */ else { - chunk_offset[i] = 0; + scaled[i] = 0; if(dims_outside_fill[i] && max_fill_chunk_off[i] >= 0) { dims_outside_fill[i] = FALSE; ndims_outside_fill--; @@ -4221,7 +4284,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) } /* end if */ else { /* Check if we just went outside the fill dimension */ - if(!dims_outside_fill[i] && (hssize_t)chunk_offset[i] > max_fill_chunk_off[i]) { + if(!dims_outside_fill[i] && (hssize_t)scaled[i] > max_fill_chunk_off[i]) { dims_outside_fill[i] = TRUE; ndims_outside_fill++; } /* end if */ @@ -4239,7 +4302,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) if(min_mod_chunk_off[op_dim] == 0) break; else - max_mod_chunk_off[op_dim] = min_mod_chunk_off[op_dim] - chunk_dim[op_dim]; + max_mod_chunk_off[op_dim] = min_mod_chunk_off[op_dim] - 1; } /* end for(op_dim=0...) */ /* Reset any cached chunk info for this dataset */ @@ -4282,7 +4345,7 @@ H5D__chunk_addrmap_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_ENTER_STATIC /* Compute the index for this chunk */ - chunk_index = H5VM_chunk_index(rank, chunk_rec->offset, udata->common.layout->dim, udata->common.layout->down_chunks); + chunk_index = H5VM_array_offset_pre(rank, udata->common.layout->down_chunks, chunk_rec->scaled); /* Set it in the userdata to return */ udata->chunk_addr[chunk_index] = chunk_rec->chunk_addr; @@ -4440,7 +4503,6 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id) H5D_rdcc_ent_t *ent, *next; /*cache entry */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ - unsigned rank; /* Current # of dimensions */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -4449,13 +4511,8 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id) HDassert(dset && H5D_CHUNKED == dset->shared->layout.type); HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - /* Get the rank */ - rank = dset->shared->layout.u.chunk.ndims - 1; - HDassert(rank > 0); - - /* 1-D dataset's chunks can't have their index change */ - if(rank == 1) - HGOTO_DONE(SUCCEED) + /* Check the rank */ + HDassert((dset->shared->layout.u.chunk.ndims - 1) > 1); /* Fill the DXPL cache values for later use */ if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) @@ -4463,18 +4520,14 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id) /* Recompute the index for each cached chunk that is in a dataset */ for(ent = rdcc->head; ent; ent = next) { - hsize_t idx; /* Chunk index */ unsigned old_idx; /* Previous index number */ /* Get the pointer to the next cache entry */ next = ent->next; - /* Calculate the index of this chunk */ - idx = H5VM_chunk_index(rank, ent->offset, dset->shared->layout.u.chunk.dim, dset->shared->layout.u.chunk.down_chunks); - /* Compute the index for the chunk entry */ old_idx = ent->idx; /* Save for later */ - ent->idx = H5D_CHUNK_HASH(dset->shared, idx); + ent->idx = H5D__chunk_hash_val(dset->shared, ent->scaled); if(old_idx != ent->idx) { H5D_rdcc_ent_t *old_ent; /* Old cache entry */ @@ -4558,7 +4611,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_ENTER_STATIC /* Get 'size_t' local value for number of bytes in chunk */ - H5_ASSIGN_OVERFLOW(nbytes, chunk_rec->nbytes, uint32_t, size_t); + H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t); /* Check parameter for type conversion */ if(udata->do_convert) { @@ -4661,7 +4714,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) /* Set up destination chunk callback information for insertion */ udata_dst.common.layout = udata->idx_info_dst->layout; udata_dst.common.storage = udata->idx_info_dst->storage; - udata_dst.common.offset = chunk_rec->offset; + udata_dst.common.scaled = chunk_rec->scaled; udata_dst.chunk_block.offset = HADDR_UNDEF; udata_dst.chunk_block.length = chunk_rec->nbytes; udata_dst.filter_mask = chunk_rec->filter_mask; @@ -4675,7 +4728,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) if(nbytes > ((size_t)0xffffffff)) HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length") #endif /* H5_SIZEOF_SIZE_T > 4 */ - H5_ASSIGN_OVERFLOW(udata_dst.chunk_block.length, nbytes, size_t, uint32_t); + H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t); udata->buf = buf; udata->buf_size = buf_size; } /* end if */ @@ -4777,7 +4830,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, /* Get the dim info for dataset */ if((sndims = H5S_extent_get_dims(ds_extent_src, curr_dims, NULL)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions") - H5_ASSIGN_OVERFLOW(ndims, sndims, int, unsigned); + H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int); /* Set the source layout chunk information */ if(H5D__chunk_set_info_real(layout_src, ndims, curr_dims) < 0) @@ -4885,7 +4938,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, do_convert = TRUE; } /* end if */ - H5_ASSIGN_OVERFLOW(buf_size, layout_src->size, uint32_t, size_t); + H5_CHECKED_ASSIGN(buf_size, size_t, layout_src->size, uint32_t); reclaim_buf_size = 0; } /* end else */ @@ -5044,7 +5097,7 @@ H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) /* Print information about this chunk */ HDfprintf(udata->stream, " 0x%08x %8Zu %10a [", chunk_rec->filter_mask, chunk_rec->nbytes, chunk_rec->chunk_addr); for(u = 0; u < udata->ndims; u++) - HDfprintf(udata->stream, "%s%Hd", (u ? ", " : ""), chunk_rec->offset[u]); + HDfprintf(udata->stream, "%s%Hu", (u ? ", " : ""), (chunk_rec->scaled[u] * udata->chunk_dim[u])); HDfputs("]\n", udata->stream); } /* end if */ @@ -5096,6 +5149,7 @@ H5D__chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream) udata.stream = stream; udata.header_displayed = FALSE; udata.ndims = dset->shared->layout.u.chunk.ndims; + udata.chunk_dim = dset->shared->layout.u.chunk.dim; /* Iterate over index and dump chunk info */ if((dset->shared->layout.storage.u.chunk.ops->iterate)(&idx_info, H5D__chunk_dump_index_cb, &udata) < 0) @@ -5256,7 +5310,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__nonexistent_readvv_cb(hsize_t UNUSED dst_off, hsize_t src_off, size_t len, +H5D__nonexistent_readvv_cb(hsize_t H5_ATTR_UNUSED dst_off, hsize_t src_off, size_t len, void *_udata) { H5D_chunk_readvv_ud_t *udata = (H5D_chunk_readvv_ud_t *)_udata; /* User data for H5VM_opvv() operator */ |