summaryrefslogtreecommitdiffstats
path: root/src/H5D.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2004-08-17 07:30:18 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2004-08-17 07:30:18 (GMT)
commit2d3c6215f26b94e6fa0cb46178ea5c60fa075d66 (patch)
tree4fc137e84fd1a4ca4925de9b35f61bc40207767b /src/H5D.c
parent3646a3f83dc32b46c18fe2bfde9a2acdc70680f4 (diff)
downloadhdf5-2d3c6215f26b94e6fa0cb46178ea5c60fa075d66.zip
hdf5-2d3c6215f26b94e6fa0cb46178ea5c60fa075d66.tar.gz
hdf5-2d3c6215f26b94e6fa0cb46178ea5c60fa075d66.tar.bz2
[svn-r9101] Purpose:
Bug fix Description: 1 - Dataset contiguous storage cache information had a bug where it was possible to try to access invalid cache information if the cache wasn't filled the first time it attempted to loop through the list of offset/length vectors. 2 - Additionally, the contiguous storage cache information was being used in certain circumstances from the chunked dataset I/O code path, which was generally fatal since the chunk storage and contiguous storage information were stored together in a union. Solution: 1 - Avoid special case of first trip through loop over offset/length I/O vectors and always check for the contiguous storage sieve buffer buffer being NULL. 2 - Change the union containing the chunk and contiguous storage cache information into a struct, allowing both to be used at the same time. Platforms tested: FreeBSD 4.10 (sleipnir) h5committested
Diffstat (limited to 'src/H5D.c')
-rw-r--r--src/H5D.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/src/H5D.c b/src/H5D.c
index be85340..434faee 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -2811,14 +2811,16 @@ H5D_close(H5D_t *dataset)
H5D_istore_stats(dataset, FALSE);
#endif /* H5F_ISTORE_DEBUG */
+ /* Free the data sieve buffer, if it's been allocated */
+ if(dataset->cache.contig.sieve_buf) {
+ assert(dataset->layout.type!=H5D_COMPACT); /* We should never have a sieve buffer for compact storage */
+ assert(dataset->cache.contig.sieve_dirty==0); /* The buffer had better be flushed... */
+ dataset->cache.contig.sieve_buf = H5FL_BLK_FREE (sieve_buf,dataset->cache.contig.sieve_buf);
+ } /* end if */
+
/* Free cached information for each kind of dataset */
switch(dataset->layout.type) {
case H5D_CONTIGUOUS:
- /* Free the data sieve buffer, if it's been allocated */
- if(dataset->cache.contig.sieve_buf) {
- assert(dataset->cache.contig.sieve_dirty==0); /* The buffer had better be flushed... */
- dataset->cache.contig.sieve_buf = H5FL_BLK_FREE (sieve_buf,dataset->cache.contig.sieve_buf);
- } /* end if */
break;
case H5D_CHUNKED:
@@ -4035,19 +4037,22 @@ H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(NULL==(dataset=H5I_object_verify(id_list[j], H5I_DATASET)))
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to get dataset object")
+ /* Flush the raw data buffer, if we have a dirty one */
+ if (dataset->cache.contig.sieve_buf && dataset->cache.contig.sieve_dirty) {
+ assert(dataset->layout.type!=H5D_COMPACT);
+
+ /* Write dirty data sieve buffer to file */
+ if (H5F_block_write(f, H5FD_MEM_DRAW, dataset->cache.contig.sieve_loc,
+ dataset->cache.contig.sieve_size, dxpl_id, dataset->cache.contig.sieve_buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed")
+
+ /* Reset sieve buffer dirty flag */
+ dataset->cache.contig.sieve_dirty=0;
+ } /* end if */
+
/* Flush cached information for each kind of dataset */
switch(dataset->layout.type) {
case H5D_CONTIGUOUS:
- /* flush the raw data buffer, if we have a dirty one */
- if (dataset->cache.contig.sieve_buf && dataset->cache.contig.sieve_dirty) {
- /* Write dirty data sieve buffer to file */
- if (H5F_block_write(f, H5FD_MEM_DRAW, dataset->cache.contig.sieve_loc,
- dataset->cache.contig.sieve_size, dxpl_id, dataset->cache.contig.sieve_buf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed")
-
- /* Reset sieve buffer dirty flag */
- dataset->cache.contig.sieve_dirty=0;
- } /* end if */
break;
case H5D_CHUNKED: