From d3d4d8782bbed2af7d1593759e3665c459c0ab1c Mon Sep 17 00:00:00 2001 From: Neil Fortner Date: Wed, 24 Mar 2010 11:41:14 -0500 Subject: [svn-r18445] Purpose: Fix bug 1637 Description: Modified algorithm for extending a dataset with early allocation so it only deals with the new chunks. Formerly, it would loop over all chunks, checking to see if each existed in cache and on disk, causing major performance issues with large numbers of chunks. Tested: jam, linew, amani (h5committest) --- release_docs/RELEASE.txt | 2 + src/H5Dbtree.c | 19 ++-- src/H5Dchunk.c | 256 +++++++++++++++++++++++++++-------------------- src/H5Ddeprec.c | 11 +- src/H5Dint.c | 33 +++--- src/H5Dio.c | 2 +- src/H5Dlayout.c | 2 +- src/H5Dpkg.h | 5 +- src/H5private.h | 2 +- test/set_extent.c | 48 +++++++++ 10 files changed, 247 insertions(+), 133 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index fbbec82..6e279f0 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -64,6 +64,8 @@ New Features Library: -------- + - Greatly improved performance of extending a dataset with early + allocation. (NAF - 2010/03/24 - 1637) - Added support for filtering densely stored groups. Many of the API functions related to filters have been extended to support dense groups as well as datasets. Pipeline messages can now be stored in a group's diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 6ca826c..9cc15c0 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -87,6 +87,12 @@ typedef struct H5D_btree_it_ud_t { void *udata; /* User data for chunk callback routine */ } H5D_btree_it_ud_t; +/* B-tree callback info for debugging */ +typedef struct H5D_btree_dbg_t { + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + unsigned ndims; /* Number of dimensions */ +} H5D_btree_dbg_t; + /********************/ /* Local Prototypes */ @@ -743,7 +749,7 @@ H5D_btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key, const void *_udata) { const H5D_btree_key_t *key = (const H5D_btree_key_t *)_key; - const unsigned *ndims = (const unsigned *)_udata; + const H5D_btree_dbg_t *udata = (const H5D_btree_dbg_t *)_udata; unsigned u; FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_btree_debug_key) @@ -753,7 +759,7 @@ H5D_btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key, HDfprintf(stream, "%*s%-*s %u bytes\n", indent, "", fwidth, "Chunk size:", (unsigned)key->nbytes); HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", key->filter_mask); HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); - for(u = 0; u < *ndims; u++) + for(u = 0; u < udata->ndims; u++) HDfprintf(stream, "%s%Hd", u?", ":"", key->offset[u]); HDfputs("}\n", stream); @@ -1437,7 +1443,7 @@ herr_t H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int fwidth, unsigned ndims) { - H5D_chunk_common_ud_t udata; /* User data for B-tree callback */ + H5D_btree_dbg_t udata; /* User data for B-tree callback */ H5O_storage_chunk_t storage; /* Storage information for B-tree callback */ hbool_t shared_init = FALSE; /* Whether B-tree shared info is initialized */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1454,9 +1460,10 @@ H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent shared_init = TRUE; /* Set up user data for callback */ - udata.layout = NULL; - udata.storage = &storage; - udata.offset = NULL; + udata.common.layout = NULL; + udata.common.storage = &storage; + udata.common.offset = NULL; + udata.ndims = ndims; /* Dump the records for the B-tree */ (void)H5B_debug(f, dxpl_id, addr, stream, indent, fwidth, H5B_BTREE, &udata); diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7485717..03a3785 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -3083,10 +3083,13 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) +H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, + hsize_t old_dim[]) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ const H5D_chunk_ops_t *ops = dset->shared->layout.storage.u.chunk.ops; /* Chunk operations */ + hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated */ + hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */ hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ size_t orig_chunk_size; /* Original size of chunk in bytes */ unsigned filter_mask = 0; /* Filter mask for chunks that have them */ @@ -3107,6 +3110,8 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ int space_ndims; /* Dataset's space rank */ hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */ + const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */ + int op_dim; /* Current operationg dimension */ H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */ @@ -3124,6 +3129,15 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info") space_dim[space_ndims] = layout->u.chunk.dim[space_ndims]; + /* Check if any space dimensions are 0, if so we do not have to do anything + */ + for(op_dim=0; op_dimshared->cache.chunk.last); + HGOTO_DONE(SUCCEED) + } /* end if */ + #ifdef H5_HAVE_PARALLEL /* Retrieve MPI parameters */ if(IS_H5FD_MPI(dset->oloc.file)) { @@ -3208,135 +3222,161 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) idx_info.layout = &dset->shared->layout.u.chunk; idx_info.storage = &dset->shared->layout.storage.u.chunk; - /* Reset the chunk offset indices */ - HDmemset(chunk_offset, 0, (layout->u.chunk.ndims * sizeof(chunk_offset[0]))); + /* Calculate the minimum and maximum chunk offsets in each dimension */ + for(op_dim=0; op_dimshared->cache.chunk); /* Raw data chunk cache */ - H5D_rdcc_ent_t *ent; /* Cache entry */ - hbool_t chunk_exists; /* Flag to indicate whether a chunk exists already */ - unsigned u; /* Local index variable */ - - /* Didn't find the chunk on disk */ - chunk_exists = FALSE; - - /* Look for chunk in cache */ - for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) { - /* Assume a match */ - chunk_exists = TRUE; - for(u = 0; u < layout->u.chunk.ndims; u++) - if(ent->offset[u] != chunk_offset[u]) { - chunk_exists = FALSE; /* Reset if no match */ - break; - } /* end if */ - } /* end for */ - - /* Chunk wasn't in cache either, create it now */ - if(!chunk_exists) { - size_t chunk_size; /* Size of chunk in bytes, possibly filtered */ - - /* Check for VL datatype & non-default fill value */ - if(fb_info_init && fb_info.has_vlen_fill_type) { - /* Sanity check */ - HDassert(should_fill); + /* Check if allocation along this dimension is really necessary */ + if(min_unalloc[op_dim] > max_unalloc[op_dim]) + carry = TRUE; + else { + /* Reset the chunk offset indices */ + HDmemset(chunk_offset, 0, (layout->u.chunk.ndims * sizeof(chunk_offset[0]))); + chunk_offset[op_dim] = min_unalloc[op_dim]; - /* Fill the buffer with VL datatype fill values */ - if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") + carry = FALSE; + } /* end if */ - /* Check if there are filters which need to be applied to the chunk */ - if(pline->nused > 0) { - size_t buf_size = orig_chunk_size; - size_t nbytes = fb_info.fill_buf_size; + while(!carry) { + size_t chunk_size; /* Size of chunk in bytes, possibly filtered */ - /* Push the chunk through the filters */ - if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") +#ifndef NDEBUG + /* None of the chunks should be allocated */ + if(H5D_chunk_get_info(dset, dxpl_id, chunk_offset, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + HDassert(!H5F_addr_defined(udata.addr)); +#endif /* NDEBUG */ + + /* Check for VL datatype & non-default fill value */ + if(fb_info_init && fb_info.has_vlen_fill_type) { + /* Sanity check */ + HDassert(should_fill); + + /* Fill the buffer with VL datatype fill values */ + if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") + + /* Check if there are filters which need to be applied to the chunk */ + if(pline->nused > 0) { + size_t buf_size = orig_chunk_size; + size_t nbytes = fb_info.fill_buf_size; + + /* Push the chunk through the filters */ + if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") #if H5_SIZEOF_SIZE_T > 4 - /* Check for the chunk expanding too much to encode in a 32-bit value */ - if(nbytes > ((size_t)0xffffffff)) - HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length") + /* Check for the chunk expanding too much to encode in a 32-bit value */ + if(nbytes > ((size_t)0xffffffff)) + HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length") #endif /* H5_SIZEOF_SIZE_T > 4 */ - /* Keep the number of bytes the chunk turned in to */ - chunk_size = nbytes; - } /* end if */ - else - H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + /* Keep the number of bytes the chunk turned in to */ + chunk_size = nbytes; } /* end if */ else - chunk_size = orig_chunk_size; - - /* Initialize the chunk information */ - udata.common.layout = &layout->u.chunk; - udata.common.storage = &layout->storage.u.chunk; - udata.common.offset = chunk_offset; - H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t); - udata.filter_mask = filter_mask; - udata.addr = HADDR_UNDEF; - - /* Allocate the chunk with all processes */ - if((ops->insert)(&idx_info, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert record into chunk index") - HDassert(H5F_addr_defined(udata.addr)); - - /* Check if fill values should be written to chunks */ - if(should_fill) { - /* Sanity check */ - HDassert(fb_info_init); - HDassert(udata.nbytes == chunk_size); + H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + } /* end if */ + else + chunk_size = orig_chunk_size; + + /* Initialize the chunk information */ + udata.common.layout = &layout->u.chunk; + udata.common.storage = &layout->storage.u.chunk; + udata.common.offset = chunk_offset; + H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t); + udata.filter_mask = filter_mask; + udata.addr = HADDR_UNDEF; + + /* Allocate the chunk with all processes */ + if((ops->insert)(&idx_info, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert record into chunk index") + HDassert(H5F_addr_defined(udata.addr)); + + /* Check if fill values should be written to chunks */ + if(should_fill) { + /* Sanity check */ + HDassert(fb_info_init); + HDassert(udata.nbytes == chunk_size); #ifdef H5_HAVE_PARALLEL - /* Check if this file is accessed with an MPI-capable file driver */ - if(using_mpi) { - /* Write the chunks out from only one process */ - /* !! Use the internal "independent" DXPL!! -QAK */ - if(H5_PAR_META_WRITE == mpi_rank) - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") - - /* Indicate that blocks are being written */ - blocks_written = TRUE; - } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ + /* Check if this file is accessed with an MPI-capable file driver */ + if(using_mpi) { + /* Write the chunks out from only one process */ + /* !! Use the internal "independent" DXPL!! -QAK */ + if(H5_PAR_META_WRITE == mpi_rank) if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") + + /* Indicate that blocks are being written */ + blocks_written = TRUE; + } /* end if */ + else { +#endif /* H5_HAVE_PARALLEL */ + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") #ifdef H5_HAVE_PARALLEL - } /* end else */ + } /* end else */ #endif /* H5_HAVE_PARALLEL */ - } /* end if */ - - /* Release the fill buffer if we need to re-allocate it each time */ - if(fb_info_init && fb_info.has_vlen_fill_type && pline->nused > 0) - H5D_fill_release(&fb_info); } /* end if */ - } /* end if */ - /* Increment indices */ - carry = TRUE; - for(i = (int)(space_ndims - 1); i >= 0; --i) { - chunk_offset[i] += layout->u.chunk.dim[i]; - if(chunk_offset[i] >= space_dim[i]) - chunk_offset[i] = 0; - else { - carry = FALSE; - break; - } /* end else */ - } /* end for */ - } /* end while */ + /* Release the fill buffer if we need to re-allocate it each time */ + if(fb_info_init && fb_info.has_vlen_fill_type && pline->nused > 0) + H5D_fill_release(&fb_info); + + /* Increment indices */ + carry = TRUE; + for(i = (int)(space_ndims - 1); i >= 0; --i) { + chunk_offset[i] += chunk_dim[i]; + if(chunk_offset[i] > max_unalloc[i]) + if(i == op_dim) + chunk_offset[i] = min_unalloc[i]; + else + chunk_offset[i] = 0; + else { + carry = FALSE; + break; + } /* end else */ + } /* end for */ + } /* end while(!carry) */ + + /* Adjust max_unalloc_dim_idx so we don't allocate the same chunk twice. + * Also check if this dimension started from 0 (and hence allocated all + * of the chunks. */ + if(min_unalloc[op_dim] == 0) + break; + else + max_unalloc[op_dim] = min_unalloc[op_dim] - chunk_dim[op_dim]; + } /* end for(op_dim=0...) */ #ifdef H5_HAVE_PARALLEL /* Only need to block at the barrier if we actually initialized a chunk */ diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c index de7f48a..e208910 100644 --- a/src/H5Ddeprec.c +++ b/src/H5Ddeprec.c @@ -320,6 +320,8 @@ H5D_extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) { htri_t changed; /* Flag to indicate that the dataspace was successfully extended */ H5S_t *space; /* Dataset's dataspace */ + int rank; /* Dataspace # of dimensions */ + hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */ H5O_fill_t *fill; /* Dataset's fill value */ herr_t ret_value = SUCCEED; /* Return value */ @@ -339,8 +341,12 @@ H5D_extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) * able to muck things up. */ - /* Increase the size of the data space */ + /* Retrieve the current dimensions */ space = dataset->shared->space; + if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") + + /* Increase the size of the data space */ if((changed = H5S_extend(space, size)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of data space") @@ -357,7 +363,8 @@ H5D_extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) /* Allocate space for the new parts of the dataset, if appropriate */ fill = &dataset->shared->dcpl_cache.fill; if(fill->alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, + curr_dims) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") /* Mark the dataspace as dirty, for later writing to the file */ diff --git a/src/H5Dint.c b/src/H5Dint.c index 5a9c0ea..d3800e8 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -58,7 +58,8 @@ typedef struct { /********************/ /* General stuff */ -static herr_t H5D_init_storage(H5D_t *dataset, hbool_t full_overwrite, hid_t dxpl_id); +static herr_t H5D_init_storage(H5D_t *dataset, hbool_t full_overwrite, + hsize_t old_dim[], hid_t dxpl_id); static herr_t H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache); static H5D_shared_t *H5D_new(hid_t dcpl_id, hbool_t creating, hbool_t vl_type); @@ -1303,7 +1304,7 @@ H5D_open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id) if((H5F_INTENT(dataset->oloc.file) & H5F_ACC_RDWR) && !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage) && IS_H5FD_MPI(dataset->oloc.file)) { - if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_OPEN, FALSE) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_OPEN, FALSE, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file storage") } /* end if */ @@ -1560,7 +1561,7 @@ H5D_typeof(const H5D_t *dset) */ herr_t H5D_alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_alloc, - hbool_t full_overwrite) + hbool_t full_overwrite, hsize_t old_dim[]) { H5F_t *f = dset->oloc.file; /* The dataset's file pointer */ H5O_layout_t *layout; /* The dataset's layout information */ @@ -1655,7 +1656,7 @@ H5D_alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_al * this is icky. -QAK */ if(!(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR && time_alloc == H5D_ALLOC_WRITE)) - if(H5D_init_storage(dset, full_overwrite, dxpl_id) < 0) + if(H5D_init_storage(dset, full_overwrite, old_dim, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") } /* end if */ else { @@ -1669,7 +1670,7 @@ H5D_alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_al * the fill value _is_ set, do that now */ if(dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC || (dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) { - if(H5D_init_storage(dset, full_overwrite, dxpl_id) < 0) + if(H5D_init_storage(dset, full_overwrite, old_dim, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") } /* end if */ } /* end else */ @@ -1706,7 +1707,8 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) +H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hsize_t old_dim[], + hid_t dxpl_id) { herr_t ret_value = SUCCEED; /* Return value */ @@ -1737,9 +1739,17 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) * Allocate file space * for all chunks now and initialize each chunk with the fill value. */ - if(H5D_chunk_allocate(dset, dxpl_id, full_overwrite) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") - break; + { + hsize_t zero_dim[H5O_LAYOUT_NDIMS] = {0}; + + /* Use zeros for old dimensions if not specified */ + if(old_dim == NULL) + old_dim = zero_dim; + + if(H5D_chunk_allocate(dset, dxpl_id, full_overwrite, old_dim) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") + break; + } /* end block */ default: HDassert("not implemented yet" && 0); @@ -2157,9 +2167,8 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) /* Allocate space for the new parts of the dataset, if appropriate */ if(expand && dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_EXTEND, FALSE) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset storage") - + if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, curr_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to extend dataset storage") /*------------------------------------------------------------------------- * Remove chunk information in the case of chunked datasets diff --git a/src/H5Dio.c b/src/H5Dio.c index 5723847..d3120b1 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -549,7 +549,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, full_overwrite = (hbool_t)((hsize_t)file_nelmts == nelmts ? TRUE : FALSE); /* Allocate storage */ - if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_WRITE, full_overwrite) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_WRITE, full_overwrite, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") } /* end if */ diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index d2ad2ac..b6382df 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -230,7 +230,7 @@ H5D_layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset, * allocation until later. */ if(fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_CREATE, FALSE) < 0) + if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_CREATE, FALSE, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") /* Update external storage message, if it's used */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 9fde879..38f1ab4 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -526,7 +526,7 @@ H5_DLL H5D_t *H5D_create_named(const H5G_loc_t *loc, const char *name, H5_DLL herr_t H5D_get_space_status(H5D_t *dset, H5D_space_status_t *allocation, hid_t dxpl_id); H5_DLL herr_t H5D_alloc_storage(H5D_t *dset, hid_t dxpl_id, H5D_time_alloc_t time_alloc, - hbool_t full_overwrite); + hbool_t full_overwrite, hsize_t old_dim[]); H5_DLL hsize_t H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id); H5_DLL haddr_t H5D_get_offset(const H5D_t *dset); H5_DLL herr_t H5D_iterate(void *buf, hid_t type_id, const H5S_t *space, @@ -608,7 +608,8 @@ H5_DLL herr_t H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed); H5_DLL herr_t H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes); -H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite); +H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, + hbool_t full_overwrite, hsize_t old_dim[]); H5_DLL herr_t H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims); #ifdef H5_HAVE_PARALLEL diff --git a/src/H5private.h b/src/H5private.h index 5be50bc..277fe70 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1143,7 +1143,7 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...); #ifndef HDsrandom #define HDsrandom(S) HDsrand(S) #endif /* HDsrandom */ - #elif H5_HAVE_RANDOM +#elif H5_HAVE_RANDOM #ifndef HDsrand #define HDsrand(S) srandom(S) #endif /* HDsrand */ diff --git a/test/set_extent.c b/test/set_extent.c index b35af8e..9d669a2 100644 --- a/test/set_extent.c +++ b/test/set_extent.c @@ -1353,6 +1353,54 @@ static int test_rank2( hid_t fapl, /*------------------------------------------------------------------------- + * expand then shrink to 0 in dimension 1 while expanding again in + * dimension 0 + * + *------------------------------------------------------------------------- + */ + + + /* expand to original dimensions for the array. */ + if (H5Dset_extent(did , dims_o) < 0) + { + TEST_ERROR + } + + dims_s[0] = dims_e[0]; + dims_s[1] = 0; + + /* set new dimensions for the array. */ + if (H5Dset_extent(did , dims_s) < 0) + { + TEST_ERROR + } + + /* get the space */ + if ((sid = H5Dget_space(did)) < 0) + { + TEST_ERROR + } + + /* get dimensions */ + if (H5Sget_simple_extent_dims(sid, dims_r, NULL) < 0) + { + TEST_ERROR + } + + if (H5Sclose(sid) < 0) + { + TEST_ERROR + } + + /* check dimensions */ + for( i = 0; i < RANK2; i++ ) + { + if (dims_r[i] != dims_s[i]) + TEST_ERROR + } + + + /*------------------------------------------------------------------------- * close dataset *------------------------------------------------------------------------- */ -- cgit v0.12