summaryrefslogtreecommitdiffstats
path: root/src/H5Ddeprec.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5Ddeprec.c')
-rw-r--r--src/H5Ddeprec.c75
1 files changed, 63 insertions, 12 deletions
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index f4bde4b..cd2bc84 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -313,9 +313,7 @@ static herr_t
H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
{
htri_t changed; /* Flag to indicate that the dataspace was successfully extended */
- H5S_t *space; /* Dataset's dataspace */
- int rank; /* Dataspace # of dimensions */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */
+ hsize_t old_dims[H5S_MAX_RANK]; /* Current (i.e. old, if changed) dimension sizes */
H5O_fill_t *fill; /* Dataset's fill value */
herr_t ret_value = SUCCEED; /* Return value */
@@ -336,29 +334,82 @@ H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
*/
/* Retrieve the current dimensions */
- space = dataset->shared->space;
- if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+ HDcompile_assert(sizeof(old_dims) == sizeof(dataset->shared->curr_dims));
+ HDmemcpy(old_dims, dataset->shared->curr_dims, H5S_MAX_RANK * sizeof(old_dims[0]));
/* Increase the size of the data space */
- if((changed = H5S_extend(space, size)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of data space")
+ if((changed = H5S_extend(dataset->shared->space, size)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of dataspace")
/* Updated the dataset's info if the dataspace was successfully extended */
if(changed) {
+ /* Get the extended dimension sizes */
+ /* (Need to retrieve this here, since the 'size' dimensions could
+ * extend one dimension but be smaller in a different dimension,
+ * and the dataspace's extent is the larger of the current and
+ * 'size' dimension values. - QAK)
+ */
+ if(H5S_get_simple_extent_dims(dataset->shared->space, dataset->shared->curr_dims, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dataset->shared->layout.type) {
+ hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */
+
+ /* Check if we need to track & update scaled dimension information */
+ if(dataset->shared->ndims > 1) {
+ unsigned u; /* Local indicate variable */
+
+ /* Update scaled chunk information */
+ for(u = 0; u < dataset->shared->ndims; u++) {
+ hsize_t scaled; /* Scaled value */
+
+ /* Compute the scaled dimension size value */
+ scaled = size[u] / dataset->shared->layout.u.chunk.dim[u];
+
+ /* Check if scaled dimension size changed */
+ if(scaled != dataset->shared->cache.chunk.scaled_dims[u]) {
+ hsize_t scaled_power2up; /* New size value, rounded to next power of 2 */
+
+ /* Update the scaled dimension size value for the current dimension */
+ dataset->shared->cache.chunk.scaled_dims[u] = scaled;
+
+ /* Check if algorithm for computing hash values will change */
+ if((scaled > dataset->shared->cache.chunk.nslots &&
+ dataset->shared->cache.chunk.scaled_dims[u] <= dataset->shared->cache.chunk.nslots)
+ || (scaled <= dataset->shared->cache.chunk.nslots &&
+ dataset->shared->cache.chunk.scaled_dims[u] > dataset->shared->cache.chunk.nslots))
+ update_chunks = TRUE;
+
+ /* Check if the number of bits required to encode the scaled size value changed */
+ if(dataset->shared->cache.chunk.scaled_power2up[u] != (scaled_power2up = H5VM_power2up(scaled))) {
+ /* Update the 'power2up' & 'encode_bits' values for the current dimension */
+ dataset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up;
+ dataset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up);
+
+ /* Indicate that the chunk cache indices should be updated */
+ update_chunks = TRUE;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ } /* end if */
+
+ /* Update general information for chunks */
if(H5D__chunk_set_info(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks")
- if(H5D__chunk_update_cache(dataset, dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+
+ /* Check for updating chunk cache indices */
+ if(update_chunks) {
+ /* Update the chunk cache indices */
+ if(H5D__chunk_update_cache(dataset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+ } /* end if */
} /* end if */
/* Allocate space for the new parts of the dataset, if appropriate */
fill = &dataset->shared->dcpl_cache.fill;
if(fill->alloc_time == H5D_ALLOC_TIME_EARLY)
- if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE,
- curr_dims) < 0)
+ if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, old_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value")
/* Mark the dataspace as dirty, for later writing to the file */