summaryrefslogtreecommitdiffstats
path: root/src/H5Dint.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2010-09-17 12:45:55 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2010-09-17 12:45:55 (GMT)
commit6747ebd9858374ae912b6182024861b1710518c8 (patch)
tree9bd75142d9dd292fe4272118f650f1c91205a988 /src/H5Dint.c
parent9de3a84f916168831f29a4259fe93cb4823d8f57 (diff)
downloadhdf5-6747ebd9858374ae912b6182024861b1710518c8.zip
hdf5-6747ebd9858374ae912b6182024861b1710518c8.tar.gz
hdf5-6747ebd9858374ae912b6182024861b1710518c8.tar.bz2
[svn-r19413] Description:
Bring r19349:19411 from trunk to revise_chunks branch. Tested on: FreeBSD/32 6.3 (duty) in debug mode FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, w/threadsafe, in production mode Linux/PPC 2.6 (heiwa) w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN, in production mode Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode Mac OS X/32 10.6.4 (amazon) in debug mode Mac OS X/32 10.6.4 (amazon) w/C++ & FORTRAN, w/threadsafe, in production mode Mac OS X/32 10.6.4 (amazon) w/parallel, in debug mode
Diffstat (limited to 'src/H5Dint.c')
-rw-r--r--src/H5Dint.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 37db4e4..ff53db0 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -562,8 +562,8 @@ H5D_new(hid_t dcpl_id, hbool_t creating, hbool_t vl_type)
done:
if(ret_value == NULL)
if(new_dset != NULL) {
- if(new_dset->dcpl_id != 0)
- (void)H5I_dec_ref(new_dset->dcpl_id, FALSE);
+ if(new_dset->dcpl_id != 0 && H5I_dec_ref(new_dset->dcpl_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't decrement temporary datatype ID")
new_dset = H5FL_FREE(H5D_shared_t, new_dset);
} /* end if */
@@ -1174,10 +1174,8 @@ done:
} /* end if */
if(new_dset->shared->space && H5S_close(new_dset->shared->space) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release dataspace")
- if(new_dset->shared->type) {
- if(H5I_dec_ref(new_dset->shared->type_id, FALSE) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype")
- } /* end if */
+ if(new_dset->shared->type && H5I_dec_ref(new_dset->shared->type_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype")
if(H5F_addr_defined(new_dset->oloc.addr)) {
if(H5O_close(&(new_dset->oloc)) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release object header")
@@ -1186,7 +1184,7 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CANTDELETE, NULL, "unable to delete object header")
} /* end if */
} /* end if */
- if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id, FALSE) < 0)
+ if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement ref count on property list")
new_dset->shared = H5FL_FREE(H5D_shared_t, new_dset->shared);
} /* end if */
@@ -1437,7 +1435,7 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace")
if(dataset->shared->type) {
if(dataset->shared->type_id > 0) {
- if(H5I_dec_ref(dataset->shared->type_id, FALSE) < 0)
+ if(H5I_dec_ref(dataset->shared->type_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype")
} /* end if */
else {
@@ -1485,9 +1483,9 @@ H5D_close(H5D_t *dataset)
dataset->shared->fo_count--;
if(dataset->shared->fo_count == 0) {
- /* Flush the dataset's information */
+ /* Flush the dataset's information. Continue to close even if it fails. */
if(H5D_flush_real(dataset, H5AC_dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
+ HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
/* Free the data sieve buffer, if it's been allocated */
if(dataset->shared->cache.contig.sieve_buf) {
@@ -1521,9 +1519,10 @@ H5D_close(H5D_t *dataset)
dataset->shared->cache.chunk.single_chunk_info = NULL;
} /* end if */
- /* Flush and destroy chunks in the cache */
+ /* Flush and destroy chunks in the cache. Continue to close even if
+ * it fails. */
if(H5D_chunk_dest(dataset->oloc.file, H5AC_dxpl_id, dataset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache")
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache")
break;
case H5D_COMPACT:
@@ -1549,8 +1548,8 @@ H5D_close(H5D_t *dataset)
* Release datatype, dataspace and creation property list -- there isn't
* much we can do if one of these fails, so we just continue.
*/
- free_failed = (unsigned)(H5I_dec_ref(dataset->shared->type_id, FALSE) < 0 || H5S_close(dataset->shared->space) < 0 ||
- H5I_dec_ref(dataset->shared->dcpl_id, FALSE) < 0);
+ free_failed = (unsigned)(H5I_dec_ref(dataset->shared->type_id) < 0 || H5S_close(dataset->shared->space) < 0 ||
+ H5I_dec_ref(dataset->shared->dcpl_id) < 0);
/* Remove the dataset from the list of opened objects in the file */
if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)