summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2011-12-01 15:00:27 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2011-12-01 15:00:27 (GMT)
commit19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf (patch)
tree673f12153a503187cb80fb3299644d47ca17d3cf
parenta529e1ae551d4c2ee8c550e33563e0ae910cc136 (diff)
downloadhdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.zip
hdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.tar.gz
hdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.tar.bz2
[svn-r21789] Purpose: Fix HDFFV-7833
Description: When shrinking a chunked dataset, the library fills in the unused parts of chunks that have been shrunk. The fill value buffer allocated for this purpose had a maximum size of 1 MB, but the fill was performed in a single operation. Therefore, if the amount of unused space in a chunk after being shrunk was greater than 1 MB, the library would read off the end of the fill value buffer. Changed the maximum fill buffer size to be equal to the chunk size. Tested: durandal; jam, koala, heiwa (h5committest)
-rw-r--r--src/H5Dchunk.c7
-rw-r--r--test/dsets.c116
2 files changed, 122 insertions, 1 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 092e1b5..6a05f2d 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -3476,6 +3476,7 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */
hssize_t sel_nelmts; /* Number of elements in selection */
hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */
+ size_t chunk_size; /*size of a chunk */
void *chunk; /* The file chunk */
H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
uint32_t bytes_accessed; /* Bytes accessed in chunk */
@@ -3485,6 +3486,10 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_prune_fill)
+ /* Get the chunk's size */
+ HDassert(layout->u.chunk.size > 0);
+ H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
+
/* Get the info for the chunk in the file */
if(H5D_chunk_lookup(dset, io_info->dxpl_id, chunk_offset,
io_info->store->chunk.index, &chk_udata) < 0)
@@ -3501,7 +3506,7 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
if(H5D_fill_init(&udata->fb_info, NULL, NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill,
dset->shared->type, dset->shared->type_id, (size_t)udata->elmts_per_chunk,
- io_info->dxpl_cache->max_temp_buf, io_info->dxpl_id) < 0)
+ chunk_size, io_info->dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
udata->fb_info_init = TRUE;
} /* end if */
diff --git a/test/dsets.c b/test/dsets.c
index 959ac43..c83518b 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -8066,6 +8066,121 @@ error:
return -1;
} /* end test_chunk_expand() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_large_chunk_shrink
+ *
+ * Purpose: Tests support for shrinking a chunk larger than 1 MB by a
+ * size greater than 1 MB.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * Monday, November 31, 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_large_chunk_shrink(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t scalar_sid = -1;/* Scalar dataspace ID */
+ hid_t dsid = -1; /* Dataset ID */
+ hsize_t dim, max_dim, chunk_dim; /* Dataset and chunk dimensions */
+ hsize_t hs_offset; /* Hyperslab offset */
+ hsize_t hs_size; /* Hyperslab size */
+ unsigned write_elem, read_elem; /* Element written/read */
+
+ TESTING("shrinking large chunk");
+
+ h5_fixname(FILENAME[10], fapl, filename, sizeof filename);
+
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set 2 MB chunk size */
+ chunk_dim = 2 * 1024 * 1024 / sizeof(unsigned);
+ if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
+
+ /* Create scalar dataspace */
+ if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR
+
+ /* Create 1-D dataspace */
+ dim = 2 * 1024 * 1024 / sizeof(unsigned);
+ max_dim = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(1, &dim, &max_dim)) < 0) FAIL_STACK_ERROR
+
+ /* Create 2 MB chunked dataset */
+ if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Select last element in the dataset */
+ hs_offset = dim - 1;
+ hs_size = 1;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Read (unwritten) element from dataset */
+ read_elem = 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify unwritten element is fill value (0) */
+ if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
+
+ /* Write element to dataset */
+ write_elem = 2;
+ if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+
+ /* Read element from dataset */
+ read_elem = write_elem + 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify written element is read in */
+ if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+
+ /* Shrink dataset to 512 KB */
+ dim = 512 * 1024 / sizeof(unsigned);
+ if(H5Dset_extent(dsid, &dim) < 0) FAIL_STACK_ERROR
+
+ /* Expand dataset back to 2MB */
+ dim = 2 * 1024 * 1024 / sizeof(unsigned);
+ if(H5Dset_extent(dsid, &dim) < 0) FAIL_STACK_ERROR
+
+ /* Read element from dataset */
+ read_elem = 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify element is now 0 */
+ if(read_elem != 0) FAIL_PUTS_ERROR("invalid element read");
+
+ /* Close everything */
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Sclose(scalar_sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_large_chunk_shrink() */
+
/*-------------------------------------------------------------------------
* Function: main
@@ -8193,6 +8308,7 @@ main(void)
nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
if(H5Fclose(file) < 0)
goto error;