summaryrefslogtreecommitdiffstats
path: root/src/H5Dint.c
diff options
context:
space:
mode:
authorRaymond Lu <songyulu@hdfgroup.org>2010-09-08 17:12:18 (GMT)
committerRaymond Lu <songyulu@hdfgroup.org>2010-09-08 17:12:18 (GMT)
commitec5f5c868fb4dad3452db045c627fe258b152cd2 (patch)
tree2d70e1cceb03c1d708eee738000681d7391cc8ae /src/H5Dint.c
parent4d88ec662b733cb6ec119e126dc96aa74ba4632b (diff)
downloadhdf5-ec5f5c868fb4dad3452db045c627fe258b152cd2.zip
hdf5-ec5f5c868fb4dad3452db045c627fe258b152cd2.tar.gz
hdf5-ec5f5c868fb4dad3452db045c627fe258b152cd2.tar.bz2
[svn-r19359] When mandatory filter failed to write data chunks, the dataset
couldn't close (bug 1260). The fix releases all resources and closes the dataset but returns a failure. Tested with h5committest - jam, heiwa, amani.
Diffstat (limited to 'src/H5Dint.c')
-rw-r--r--src/H5Dint.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 538ff80..446d040 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -561,7 +561,7 @@ done:
if(ret_value == NULL)
if(new_dset != NULL) {
if(new_dset->dcpl_id != 0)
- (void)H5I_dec_ref(new_dset->dcpl_id, FALSE);
+ (void)H5I_dec_ref(new_dset->dcpl_id, FALSE, FALSE);
new_dset = H5FL_FREE(H5D_shared_t, new_dset);
} /* end if */
@@ -1061,7 +1061,7 @@ done:
if(new_dset->shared->space && H5S_close(new_dset->shared->space) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release dataspace")
if(new_dset->shared->type) {
- if(H5I_dec_ref(new_dset->shared->type_id, FALSE) < 0)
+ if(H5I_dec_ref(new_dset->shared->type_id, FALSE, FALSE) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype")
} /* end if */
if(H5F_addr_defined(new_dset->oloc.addr)) {
@@ -1072,7 +1072,7 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CANTDELETE, NULL, "unable to delete object header")
} /* end if */
} /* end if */
- if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id, FALSE) < 0)
+ if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id, FALSE, FALSE) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement ref count on property list")
new_dset->shared = H5FL_FREE(H5D_shared_t, new_dset->shared);
} /* end if */
@@ -1319,7 +1319,7 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace")
if(dataset->shared->type) {
if(dataset->shared->type_id > 0) {
- if(H5I_dec_ref(dataset->shared->type_id, FALSE) < 0)
+ if(H5I_dec_ref(dataset->shared->type_id, FALSE, FALSE) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype")
} /* end if */
else {
@@ -1367,9 +1367,9 @@ H5D_close(H5D_t *dataset)
dataset->shared->fo_count--;
if(dataset->shared->fo_count == 0) {
- /* Flush the dataset's information */
+ /* Flush the dataset's information. Continue to close even if it fails. */
if(H5D_flush_real(dataset, H5AC_dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
+ HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
/* Free the data sieve buffer, if it's been allocated */
if(dataset->shared->cache.contig.sieve_buf) {
@@ -1403,9 +1403,10 @@ H5D_close(H5D_t *dataset)
dataset->shared->cache.chunk.single_chunk_info = NULL;
} /* end if */
- /* Flush and destroy chunks in the cache */
+ /* Flush and destroy chunks in the cache. Continue to close even if
+ * it fails. */
if(H5D_chunk_dest(dataset->oloc.file, H5AC_dxpl_id, dataset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache")
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache")
break;
case H5D_COMPACT:
@@ -1426,8 +1427,8 @@ H5D_close(H5D_t *dataset)
* Release datatype, dataspace and creation property list -- there isn't
* much we can do if one of these fails, so we just continue.
*/
- free_failed = (unsigned)(H5I_dec_ref(dataset->shared->type_id, FALSE) < 0 || H5S_close(dataset->shared->space) < 0 ||
- H5I_dec_ref(dataset->shared->dcpl_id, FALSE) < 0);
+ free_failed = (unsigned)(H5I_dec_ref(dataset->shared->type_id, FALSE, FALSE) < 0 || H5S_close(dataset->shared->space) < 0 ||
+ H5I_dec_ref(dataset->shared->dcpl_id, FALSE, FALSE) < 0);
/* Remove the dataset from the list of opened objects in the file */
if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)