summaryrefslogtreecommitdiffstats
path: root/src/H5Dchunk.c
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2021-05-04 17:39:49 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2021-05-04 17:39:49 (GMT)
commit5de2d1a1866819c1edd1c849d8e28dfb2d32b317 (patch)
tree3b187150318f7a0ff6b4b43270f3388ab626ff6e /src/H5Dchunk.c
parent8c4af327c6488b9ea3cddee1ae14bf15f0a8e2ea (diff)
downloadhdf5-5de2d1a1866819c1edd1c849d8e28dfb2d32b317.zip
hdf5-5de2d1a1866819c1edd1c849d8e28dfb2d32b317.tar.gz
hdf5-5de2d1a1866819c1edd1c849d8e28dfb2d32b317.tar.bz2
Add selection I/O to chunk code, used when: not using chunk cache, no
datatype conversion, no I/O filters, no page buffer, not using collective I/O. Requires global variable H5_use_selection_io_g be set to TRUE. Implemented selection to vector I/O transaltion at the file driver layer.
Diffstat (limited to 'src/H5Dchunk.c')
-rw-r--r--src/H5Dchunk.c766
1 files changed, 576 insertions, 190 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 9d47a4a..f9f56f6 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -59,6 +59,7 @@
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5MFprivate.h" /* File memory management */
+#include "H5PBprivate.h" /* Page Buffer */
#include "H5VMprivate.h" /* Vector and array functions */
/****************/
@@ -70,6 +71,7 @@
#define H5D_CHUNK_GET_NODE_INFO(map, node) \
(map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node))
#define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node))
+#define H5D_CHUNK_GET_NODE_COUNT(map) (map->use_single ? (size_t)1 : H5SL_count(map->sel_chunks))
/* Sanity check on chunk index types: commonly used by a lot of routines in this file */
#define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \
@@ -298,6 +300,7 @@ static herr_t H5D__chunk_file_cb(void *elem, const H5T_t *type, unsigned ndims
void *fm);
static herr_t H5D__chunk_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords,
void *fm);
+static htri_t H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info);
static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled);
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush);
@@ -2436,6 +2439,74 @@ done:
} /* end H5D__chunk_cacheable() */
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_may_use_select_io
+ *
+ * Purpose: A small internal function to if it may be possible to use
+ * selection I/O.
+ *
+ * Return: TRUE or FALSE
+ *
+ * Programmer: Neil Fortner
+ * 4 May 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info)
+{
+ const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ htri_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(io_info);
+ HDassert(dataset);
+
+ /* Don't use selection I/O if it's globally disabled, there is a type
+ * conversion, or if there are filters on the dataset (for now) */
+ if (!H5_use_selection_io_g || io_info->io_ops.single_read != H5D__select_read ||
+ dataset->shared->dcpl_cache.pline.nused > 0)
+ ret_value = FALSE;
+ else {
+ htri_t page_buf_enabled;
+
+ /* Check if the page buffer is enabled */
+ if ((page_buf_enabled = H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled")
+ if (page_buf_enabled)
+ ret_value = FALSE;
+ else {
+ /* Check if chunks in this dataset may be cached, if so don't use
+ * selection I/O (for now). Note that chunks temporarily cached for
+ * the purpose of writing the fill value don't count, since they are
+ * immediately evicted. */
+#ifdef H5_HAVE_PARALLEL
+ /* If MPI based VFD is used and the file is opened for write access,
+ * must bypass the chunk-cache scheme because other MPI processes
+ * could be writing to other elements in the same chunk.
+ */
+ if (io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file)))
+ ret_value = TRUE;
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ /* Check if the chunk is too large to keep in the cache */
+ H5_CHECK_OVERFLOW(dataset->shared->layout.u.chunk.size, uint32_t, size_t);
+ if ((size_t)dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes_max)
+ ret_value = TRUE;
+ else
+ ret_value = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
+#endif /* H5_HAVE_PARALLEL */
+ } /* end else */
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_may_use_select_io() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_read
*
* Purpose: Read from a chunked dataset.
@@ -2452,16 +2523,18 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space,
H5D_chunk_map_t *fm)
{
- H5SL_node_t * chunk_node; /* Current node in chunk skip list */
- H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */
- H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- H5D_io_info_t cpt_io_info; /* Compact I/O info object */
- H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
- hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
- uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
- hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
- herr_t ret_value = SUCCEED; /*return value */
+ H5SL_node_t * chunk_node; /* Current node in chunk skip list */
+ H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */
+ uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
+ hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
+ H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */
+ H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */
+ H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */
+ H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */
+ haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */
+ haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */
+ htri_t use_selection_io = FALSE; /* Whether to use selection I/O */
+ herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_STATIC
@@ -2475,23 +2548,6 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
H5MM_memcpy(&nonexistent_io_info, io_info, sizeof(nonexistent_io_info));
nonexistent_io_info.layout_ops = *H5D_LOPS_NONEXISTENT;
- /* Set up contiguous I/O info object */
- H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info));
- ctg_io_info.store = &ctg_store;
- ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
-
- /* Initialize temporary contiguous storage info */
- H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size,
- uint32_t);
-
- /* Set up compact I/O info object */
- H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
- cpt_io_info.store = &cpt_store;
- cpt_io_info.layout_ops = *H5D_LOPS_COMPACT;
-
- /* Initialize temporary compact storage info */
- cpt_store.compact.dirty = &cpt_dirty;
-
{
const H5O_fill_t *fill = &(io_info->dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* Fill value status */
@@ -2509,80 +2565,220 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
skip_missing_chunks = TRUE;
}
- /* Iterate through nodes in chunk skip list */
- chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
- while (chunk_node) {
- H5D_chunk_info_t *chunk_info; /* Chunk information */
- H5D_chunk_ud_t udata; /* Chunk index pass-through */
+ /* Check if we're performing selection I/O */
+ if ((use_selection_io = H5D__chunk_may_use_select_io(io_info)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible")
+
+ /* Different blocks depending on whether we're using selection I/O */
+ if (use_selection_io) {
+ size_t num_chunks;
+ size_t element_sizes[2] = {type_info->dst_type_size, 0};
+ void * bufs[2] = {io_info->u.rbuf, NULL};
+
+ /* Cache number of chunks */
+ num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm);
+
+ /* Allocate arrays of dataspaces and offsets for use with selection I/O,
+ * or point to static buffers */
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0]));
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0]));
+ if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) {
+ if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for memory space list")
+ if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list")
+ if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for chunk address list")
+ } /* end if */
+ else {
+ chunk_mem_spaces = chunk_mem_spaces_static;
+ chunk_file_spaces = chunk_file_spaces_static;
+ chunk_addrs = chunk_addrs_static;
+ } /* end else */
- /* Get the actual chunk information from the skip list node */
- chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+ /* Reset num_chunks */
+ num_chunks = 0;
- /* Get the info for the chunk in the file */
- if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t *chunk_info; /* Chunk information */
+ H5D_chunk_ud_t udata; /* Chunk index pass-through */
- /* Sanity check */
- HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
- (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
- /* Check for non-existant chunk & skip it if appropriate */
- if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint ||
- !skip_missing_chunks) {
- H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
- void * chunk = NULL; /* Pointer to locked chunk buffer */
- htri_t cacheable; /* Whether the chunk is cacheable */
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
+ /* There should be no chunks cached */
+ HDassert(UINT_MAX == udata.idx_hint);
- /* Determine if we should use the chunk cache */
- if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
- if (cacheable) {
- /* Load the chunk into cache and lock it. */
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Check for non-existant chunk & skip it if appropriate */
+ if (H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Add chunk to list for selection I/O */
+ chunk_mem_spaces[num_chunks] = chunk_info->mspace;
+ chunk_file_spaces[num_chunks] = chunk_info->fspace;
+ chunk_addrs[num_chunks] = udata.chunk_block.offset;
+ num_chunks++;
+ } /* end if */
+ else if (!skip_missing_chunks) {
+ /* Perform the actual read operation from the nonexistent chunk
+ */
+ if ((io_info->io_ops.single_read)(&nonexistent_io_info, type_info,
+ (hsize_t)chunk_info->chunk_points, chunk_info->fspace,
+ chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+ } /* end if */
- /* Compute # of bytes accessed in chunk */
- H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
- src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
- /* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (num_chunks > 0 &&
+ H5F_shared_select_read(H5F_SHARED(io_info->dset->oloc.file), (uint32_t)num_chunks, H5FD_MEM_DRAW,
+ chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed")
+
+ /* Clean up memory */
+ if (chunk_mem_spaces != chunk_mem_spaces_static) {
+ HDassert(chunk_mem_spaces);
+ HDassert(chunk_file_spaces != chunk_file_spaces_static);
+ HDassert(chunk_addrs != chunk_addrs_static);
+ H5MM_free(chunk_mem_spaces);
+ chunk_mem_spaces = NULL;
+ H5MM_free(chunk_file_spaces);
+ chunk_file_spaces = NULL;
+ H5MM_free(chunk_addrs);
+ chunk_addrs = NULL;
+ } /* end if */
+ } /* end if */
+ else {
+ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ H5D_io_info_t cpt_io_info; /* Compact I/O info object */
+ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
+ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
+
+ /* Set up contiguous I/O info object */
+ H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info));
+ ctg_io_info.store = &ctg_store;
+ ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
+
+ /* Initialize temporary contiguous storage info */
+ H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size,
+ uint32_t);
+
+ /* Set up compact I/O info object */
+ H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
+ cpt_io_info.store = &cpt_store;
+ cpt_io_info.layout_ops = *H5D_LOPS_COMPACT;
+
+ /* Initialize temporary compact storage info */
+ cpt_store.compact.dirty = &cpt_dirty;
+
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t *chunk_info; /* Chunk information */
+ H5D_chunk_ud_t udata; /* Chunk index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- /* Set up the storage buffer information for this chunk */
- cpt_store.compact.buf = chunk;
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
- /* Point I/O info at contiguous I/O info for this chunk */
- chk_io_info = &cpt_io_info;
- } /* end if */
- else if (H5F_addr_defined(udata.chunk_block.offset)) {
- /* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.chunk_block.offset;
+ /* Check for non-existant chunk & skip it if appropriate */
+ if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint ||
+ !skip_missing_chunks) {
+ H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
+ void * chunk = NULL; /* Pointer to locked chunk buffer */
- /* Point I/O info at temporary I/O info for this chunk */
- chk_io_info = &ctg_io_info;
- } /* end else if */
- else {
- /* Point I/O info at "nonexistent" I/O info for this chunk */
- chk_io_info = &nonexistent_io_info;
- } /* end else */
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
- /* Perform the actual read operation */
- if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
- chunk_info->fspace, chunk_info->mspace) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache and lock it. */
- /* Release the cache lock on the chunk. */
- if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
- } /* end if */
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
- /* Advance to next chunk in list */
- chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
- } /* end while */
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Point I/O info at contiguous I/O info for this chunk */
+ chk_io_info = &cpt_io_info;
+ } /* end if */
+ else if (H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Set up the storage address information for this chunk */
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
+
+ /* Point I/O info at temporary I/O info for this chunk */
+ chk_io_info = &ctg_io_info;
+ } /* end else if */
+ else {
+ /* Point I/O info at "nonexistent" I/O info for this chunk */
+ chk_io_info = &nonexistent_io_info;
+ } /* end else */
+
+ /* Perform the actual read operation */
+ if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+
+ /* Release the cache lock on the chunk. */
+ if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+ } /* end else */
done:
+ /* Cleanup on failure */
+ if (ret_value < 0) {
+ if (chunk_mem_spaces != chunk_mem_spaces_static)
+ chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces);
+ if (chunk_file_spaces != chunk_file_spaces_static)
+ chunk_file_spaces = H5MM_xfree(chunk_file_spaces);
+ if (chunk_addrs != chunk_addrs_static)
+ chunk_addrs = H5MM_xfree(chunk_addrs);
+ } /* end if */
+
+ /* Make sure we cleaned up */
+ HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static);
+ HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static);
+ HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__chunk_read() */
@@ -2603,14 +2799,21 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space,
H5D_chunk_map_t *fm)
{
- H5SL_node_t * chunk_node; /* Current node in chunk skip list */
- H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- H5D_io_info_t cpt_io_info; /* Compact I/O info object */
- H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
- hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
- uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SL_node_t * chunk_node; /* Current node in chunk skip list */
+ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ H5D_io_info_t cpt_io_info; /* Compact I/O info object */
+ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
+ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
+ uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
+ H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */
+ H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */
+ H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */
+ H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */
+ haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */
+ haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */
+ htri_t use_selection_io = FALSE; /* Whether to use selection I/O */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2637,116 +2840,299 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
/* Initialize temporary compact storage info */
cpt_store.compact.dirty = &cpt_dirty;
- /* Iterate through nodes in chunk skip list */
- chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
- while (chunk_node) {
- H5D_chunk_info_t * chunk_info; /* Chunk information */
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
- H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */
- void * chunk; /* Pointer to locked chunk buffer */
- H5D_chunk_ud_t udata; /* Index pass-through */
- htri_t cacheable; /* Whether the chunk is cacheable */
- hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
+ /* Check if we're performing selection I/O */
+ if ((use_selection_io = H5D__chunk_may_use_select_io(io_info)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible")
+
+ /* Different blocks depending on whether we're using selection I/O */
+ if (use_selection_io) {
+ size_t num_chunks;
+ size_t element_sizes[2] = {type_info->dst_type_size, 0};
+ const void *bufs[2] = {io_info->u.wbuf, NULL};
+
+ /* Cache number of chunks */
+ num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm);
+
+ /* Allocate arrays of dataspaces and offsets for use with selection I/O,
+ * or point to static buffers */
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0]));
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0]));
+ if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) {
+ if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for memory space list")
+ if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list")
+ if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for chunk address list")
+ } /* end if */
+ else {
+ chunk_mem_spaces = chunk_mem_spaces_static;
+ chunk_file_spaces = chunk_file_spaces_static;
+ chunk_addrs = chunk_addrs_static;
+ } /* end else */
- /* Get the actual chunk information from the skip list node */
- chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+ /* Reset num_chunks */
+ num_chunks = 0;
- /* Look up the chunk */
- if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t * chunk_info; /* Chunk information */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5D_chunk_ud_t udata; /* Index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
- /* Sanity check */
- HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
- (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
-
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
-
- /* Determine if we should use the chunk cache */
- if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
- if (cacheable) {
- /* Load the chunk into cache. But if the whole chunk is written,
- * simply allocate space instead of load the chunk. */
- hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
-
- /* Compute # of bytes accessed in chunk */
- H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
- dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
-
- /* Determine if we will access all the data in the chunk */
- if (dst_accessed_bytes != ctg_store.contig.dset_size ||
- (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
- fm->fsel_type == H5S_SEL_POINTS)
- entire_chunk = FALSE;
-
- /* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
-
- /* Set up the storage buffer information for this chunk */
- cpt_store.compact.buf = chunk;
-
- /* Point I/O info at main I/O info for this chunk */
- chk_io_info = &cpt_io_info;
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* There should be no chunks cached */
+ HDassert(UINT_MAX == udata.idx_hint);
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache. But if the whole chunk is written,
+ * simply allocate space instead of load the chunk. */
+ void * chunk; /* Pointer to locked chunk buffer */
+ hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
+
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
+
+ /* Determine if we will access all the data in the chunk */
+ if (dst_accessed_bytes != ctg_store.contig.dset_size ||
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
+ entire_chunk = FALSE;
+
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Perform the actual write operation */
+ if ((io_info->io_ops.single_write)(&cpt_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
+
+ /* Release the cache lock on the chunk */
+ if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+ else {
+ /* If the chunk hasn't been allocated on disk, do so now. */
+ if (!H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Compose chunked index info struct */
+ idx_info.f = io_info->dset->oloc.file;
+ idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
+ idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
+ idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
+
+ /* Set up the size of chunk for user data */
+ udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
+
+ /* Allocate the chunk */
+ if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
+ chunk_info->scaled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert/resize chunk on chunk level")
+
+ /* Make sure the address of the chunk is returned. */
+ if (!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Cache the new chunk information */
+ H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+
+ /* Insert chunk into index */
+ if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata,
+ NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert chunk addr into index")
+ } /* end if */
+
+ /* Add chunk to list for selection I/O */
+ chunk_mem_spaces[num_chunks] = chunk_info->mspace;
+ chunk_file_spaces[num_chunks] = chunk_info->fspace;
+ chunk_addrs[num_chunks] = udata.chunk_block.offset;
+ num_chunks++;
+ } /* end else */
+
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (num_chunks > 0 && H5F_shared_select_write(
+ H5F_SHARED(io_info->dset->oloc.file), (uint32_t)num_chunks, H5FD_MEM_DRAW,
+ chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed")
+
+ /* Clean up memory */
+ if (chunk_mem_spaces != chunk_mem_spaces_static) {
+ HDassert(chunk_mem_spaces);
+ HDassert(chunk_file_spaces != chunk_file_spaces_static);
+ HDassert(chunk_addrs != chunk_addrs_static);
+ H5MM_free(chunk_mem_spaces);
+ chunk_mem_spaces = NULL;
+ H5MM_free(chunk_file_spaces);
+ chunk_file_spaces = NULL;
+ H5MM_free(chunk_addrs);
+ chunk_addrs = NULL;
} /* end if */
- else {
- /* If the chunk hasn't been allocated on disk, do so now. */
- if (!H5F_addr_defined(udata.chunk_block.offset)) {
- /* Compose chunked index info struct */
- idx_info.f = io_info->dset->oloc.file;
- idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
- idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
- idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
-
- /* Set up the size of chunk for user data */
- udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
-
- /* Allocate the chunk */
- if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
- chunk_info->scaled) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
- "unable to insert/resize chunk on chunk level")
-
- /* Make sure the address of the chunk is returned. */
- if (!H5F_addr_defined(udata.chunk_block.offset))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
-
- /* Cache the new chunk information */
- H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+ } /* end if */
+ else {
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t * chunk_info; /* Chunk information */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */
+ void * chunk; /* Pointer to locked chunk buffer */
+ H5D_chunk_ud_t udata; /* Index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
+
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Look up the chunk */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache. But if the whole chunk is written,
+ * simply allocate space instead of load the chunk. */
+ hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
+
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
+
+ /* Determine if we will access all the data in the chunk */
+ if (dst_accessed_bytes != ctg_store.contig.dset_size ||
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
+ entire_chunk = FALSE;
+
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Point I/O info at main I/O info for this chunk */
+ chk_io_info = &cpt_io_info;
} /* end if */
+ else {
+ /* If the chunk hasn't been allocated on disk, do so now. */
+ if (!H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Compose chunked index info struct */
+ idx_info.f = io_info->dset->oloc.file;
+ idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
+ idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
+ idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
+
+ /* Set up the size of chunk for user data */
+ udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
+
+ /* Allocate the chunk */
+ if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
+ chunk_info->scaled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert/resize chunk on chunk level")
+
+ /* Make sure the address of the chunk is returned. */
+ if (!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Cache the new chunk information */
+ H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+ } /* end if */
- /* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.chunk_block.offset;
+ /* Set up the storage address information for this chunk */
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
- /* No chunk cached */
- chunk = NULL;
+ /* No chunk cached */
+ chunk = NULL;
- /* Point I/O info at temporary I/O info for this chunk */
- chk_io_info = &ctg_io_info;
- } /* end else */
+ /* Point I/O info at temporary I/O info for this chunk */
+ chk_io_info = &ctg_io_info;
+ } /* end else */
- /* Perform the actual write operation */
- if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
- chunk_info->fspace, chunk_info->mspace) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
+ /* Perform the actual write operation */
+ if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
- /* Release the cache lock on the chunk, or insert chunk into index. */
- if (chunk) {
- if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
- } /* end if */
- else {
- if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
- if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
- } /* end else */
+ /* Release the cache lock on the chunk, or insert chunk into index. */
+ if (chunk) {
+ if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+ else {
+ if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert chunk addr into index")
+ } /* end else */
- /* Advance to next chunk in list */
- chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
- } /* end while */
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+ } /* end else */
done:
+ /* Cleanup on failure */
+ if (ret_value < 0) {
+ if (chunk_mem_spaces != chunk_mem_spaces_static)
+ chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces);
+ if (chunk_file_spaces != chunk_file_spaces_static)
+ chunk_file_spaces = H5MM_xfree(chunk_file_spaces);
+ if (chunk_addrs != chunk_addrs_static)
+ chunk_addrs = H5MM_xfree(chunk_addrs);
+ } /* end if */
+
+ /* Make sure we cleaned up */
+ HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static);
+ HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static);
+ HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__chunk_write() */