diff options
author | Neil Fortner <nfortne2@hdfgroup.org> | 2011-12-01 15:00:27 (GMT) |
---|---|---|
committer | Neil Fortner <nfortne2@hdfgroup.org> | 2011-12-01 15:00:27 (GMT) |
commit | 19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf (patch) | |
tree | 673f12153a503187cb80fb3299644d47ca17d3cf /test | |
parent | a529e1ae551d4c2ee8c550e33563e0ae910cc136 (diff) | |
download | hdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.zip hdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.tar.gz hdf5-19f75c804a64cefd49ee7e1ac5d9ae3ea63f68cf.tar.bz2 |
[svn-r21789] Purpose: Fix HDFFV-7833
Description:
When shrinking a chunked dataset, the library fills in the unused parts of
chunks that have been shrunk. The fill value buffer allocated for this purpose
had a maximum size of 1 MB, but the fill was performed in a single operation.
Therefore, if the amount of unused space in a chunk after being shrunk was
greater than 1 MB, the library would read off the end of the fill value buffer.
Changed the maximum fill buffer size to be equal to the chunk size.
Tested: durandal; jam, koala, heiwa (h5committest)
Diffstat (limited to 'test')
-rw-r--r-- | test/dsets.c | 116 |
1 files changed, 116 insertions, 0 deletions
diff --git a/test/dsets.c b/test/dsets.c index 959ac43..c83518b 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -8066,6 +8066,121 @@ error: return -1; } /* end test_chunk_expand() */ + +/*------------------------------------------------------------------------- + * Function: test_large_chunk_shrink + * + * Purpose: Tests support for shrinking a chunk larger than 1 MB by a + * size greater than 1 MB. + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Neil Fortner + * Monday, November 31, 2011 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_large_chunk_shrink(hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = -1; /* File ID */ + hid_t dcpl = -1; /* Dataset creation property list ID */ + hid_t sid = -1; /* Dataspace ID */ + hid_t scalar_sid = -1;/* Scalar dataspace ID */ + hid_t dsid = -1; /* Dataset ID */ + hsize_t dim, max_dim, chunk_dim; /* Dataset and chunk dimensions */ + hsize_t hs_offset; /* Hyperslab offset */ + hsize_t hs_size; /* Hyperslab size */ + unsigned write_elem, read_elem; /* Element written/read */ + + TESTING("shrinking large chunk"); + + h5_fixname(FILENAME[10], fapl, filename, sizeof filename); + + /* Create file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR + + /* Create dataset creation property list */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR + + /* Set 2 MB chunk size */ + chunk_dim = 2 * 1024 * 1024 / sizeof(unsigned); + if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR + + /* Create scalar dataspace */ + if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR + + /* Create 1-D dataspace */ + dim = 2 * 1024 * 1024 / sizeof(unsigned); + max_dim = H5S_UNLIMITED; + if((sid = H5Screate_simple(1, &dim, &max_dim)) < 0) FAIL_STACK_ERROR + + /* Create 2 MB chunked dataset */ + if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Select last element in the dataset */ + hs_offset = dim - 1; + hs_size = 1; + if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) FAIL_STACK_ERROR + + /* Read (unwritten) element from dataset */ + read_elem = 1; + if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR + + /* Verify unwritten element is fill value (0) */ + if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read"); + + /* Write element to dataset */ + write_elem = 2; + if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR + + /* Read element from dataset */ + read_elem = write_elem + 1; + if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR + + /* Verify written element is read in */ + if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read"); + + /* Shrink dataset to 512 KB */ + dim = 512 * 1024 / sizeof(unsigned); + if(H5Dset_extent(dsid, &dim) < 0) FAIL_STACK_ERROR + + /* Expand dataset back to 2MB */ + dim = 2 * 1024 * 1024 / sizeof(unsigned); + if(H5Dset_extent(dsid, &dim) < 0) FAIL_STACK_ERROR + + /* Read element from dataset */ + read_elem = 1; + if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR + + /* Verify element is now 0 */ + if(read_elem != 0) FAIL_PUTS_ERROR("invalid element read"); + + /* Close everything */ + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl); + H5Dclose(dsid); + H5Sclose(sid); + H5Sclose(scalar_sid); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +} /* end test_large_chunk_shrink() */ + /*------------------------------------------------------------------------- * Function: main @@ -8193,6 +8308,7 @@ main(void) nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0); nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0); + nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0); if(H5Fclose(file) < 0) goto error; |