diff options
author | Pedro Vicente Nunes <pvn@hdfgroup.org> | 2008-12-29 23:00:39 (GMT) |
---|---|---|
committer | Pedro Vicente Nunes <pvn@hdfgroup.org> | 2008-12-29 23:00:39 (GMT) |
commit | 4acfa70a5f00f71768d1e4157a4009c2f757997e (patch) | |
tree | d724842323cedac56dfa36f178ee2211b38aa8da /src/H5D.c | |
parent | 655520617f674274a6776f4cc5779d4c7a1196a3 (diff) | |
download | hdf5-4acfa70a5f00f71768d1e4157a4009c2f757997e.zip hdf5-4acfa70a5f00f71768d1e4157a4009c2f757997e.tar.gz hdf5-4acfa70a5f00f71768d1e4157a4009c2f757997e.tar.bz2 |
[svn-r16230]
Bug fixes: chunks on the btree were not deleted for the case when the new dimension was on the boundary of the chunk offset (comparison of offset > chunk instead of offset >= chunk). In extending the space, the new size was wrongly compared with the maximum extend possible
Modified H5Dset_extent so that it fails when called for compact datasets and contiguous with no external storage
new test program as 1.8 and 1.9: It adds tests for several ranks, use of fill value or not, compression, different fill value allocation times, use of different storage layouts, and external files
tested: windows, linux
Diffstat (limited to 'src/H5D.c')
-rw-r--r-- | src/H5D.c | 16 |
1 files changed, 15 insertions, 1 deletions
@@ -3871,6 +3871,20 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) /* Check args */ assert(dset); assert(size); + + /* Check if we are allowed to modify the space; only datasets with chunked and external storage are allowed to be modified */ + if( H5D_COMPACT == dset->shared->layout.type ) + { + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "dataset has compact storage") + } + if( H5D_CONTIGUOUS == dset->shared->layout.type ) + { + if( 0 == dset->shared->efl.nused) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "dataset has contiguous storage") + } + + } /*------------------------------------------------------------------------- * Get the data space @@ -3929,7 +3943,7 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) } /* Allocate space for the new parts of the dataset, if appropriate */ - if(expand && dset->shared->alloc_time==H5D_ALLOC_TIME_EARLY) + if(expand && dset->shared->alloc_time==H5D_ALLOC_TIME_EARLY ) { if(H5D_alloc_storage(dset->ent.file, dxpl_id, dset, H5D_ALLOC_EXTEND, TRUE, FALSE) < 0) { |