diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2005-07-09 05:02:14 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2005-07-09 05:02:14 (GMT) |
commit | 59fbb04095a4aa0019fc8623bad9d5bf72b5ddec (patch) | |
tree | 3231a5c6daeaa218a6807702937fc45b98a560d6 /src/H5D.c | |
parent | 28c2b7f561049e6119ba6302099bd1083ba3ef42 (diff) | |
download | hdf5-59fbb04095a4aa0019fc8623bad9d5bf72b5ddec.zip hdf5-59fbb04095a4aa0019fc8623bad9d5bf72b5ddec.tar.gz hdf5-59fbb04095a4aa0019fc8623bad9d5bf72b5ddec.tar.bz2 |
[svn-r11058] Purpose:
Bug fix
Description:
Correct error where the dataset sieve buffer was inadvertantly disabled,
resulting in poor raw data I/O performance for certain I/O patterns.
Solution:
Hook dataset sieve buffer size setting back up.
Platforms tested:
FreeBSD 4.11 (sleipnir)
Solaris 2.9 (shanti)
Diffstat (limited to 'src/H5D.c')
-rw-r--r-- | src/H5D.c | 21 |
1 files changed, 19 insertions, 2 deletions
@@ -1580,7 +1580,7 @@ H5D_new(hid_t dcpl_id, hbool_t creating, hbool_t vl_type) H5D_shared_t *new_dset = NULL; /* New dataset object */ H5D_shared_t *ret_value; /* Return value */ - FUNC_ENTER_NOAPI(H5D_new, NULL) + FUNC_ENTER_NOAPI_NOINIT(H5D_new) if (NULL==(new_dset = H5FL_MALLOC(H5D_shared_t))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") @@ -2176,6 +2176,9 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space tmp_size = H5S_GET_EXTENT_NPOINTS(new_dset->shared->space) * H5T_get_size(new_dset->shared->type); H5_ASSIGN_OVERFLOW(new_dset->shared->layout.u.contig.size,tmp_size,hssize_t,hsize_t); + + /* Get the sieve buffer size for this dataset */ + new_dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(loc->file); } /* end case */ break; @@ -2556,6 +2559,9 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id) /* Set the I/O functions for this layout type */ dataset->shared->io_ops.readvv=H5D_contig_readvv; dataset->shared->io_ops.writevv=H5D_contig_writevv; + + /* Get the sieve buffer size for this dataset */ + dataset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(ent->file); break; case H5D_CHUNKED: @@ -2767,7 +2773,18 @@ H5D_close(H5D_t *dataset) /* Free the data sieve buffer, if it's been allocated */ if(dataset->shared->cache.contig.sieve_buf) { assert(dataset->shared->layout.type!=H5D_COMPACT); /* We should never have a sieve buffer for compact storage */ - assert(dataset->shared->cache.contig.sieve_dirty==0); /* The buffer had better be flushed... */ + + /* Flush the raw data buffer, if its dirty */ + if (dataset->shared->cache.contig.sieve_dirty) { + /* Write dirty data sieve buffer to file */ + if (H5F_block_write(dataset->ent.file, H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc, + dataset->shared->cache.contig.sieve_size, H5AC_dxpl_id, dataset->shared->cache.contig.sieve_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed") + + /* Reset sieve buffer dirty flag */ + dataset->shared->cache.contig.sieve_dirty=0; + } /* end if */ + dataset->shared->cache.contig.sieve_buf = H5FL_BLK_FREE (sieve_buf,dataset->shared->cache.contig.sieve_buf); } /* end if */ |