summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2010-07-28 15:56:28 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2010-07-28 15:56:28 (GMT)
commit9c218ea879de3f13464aed3c96d5298d1c934774 (patch)
treead87e0a97a4435ce5da69da992c9c424e0db3469
parentbcbf482347a633b07af9eb19ddaccf2bdfccdfa6 (diff)
downloadhdf5-9c218ea879de3f13464aed3c96d5298d1c934774.zip
hdf5-9c218ea879de3f13464aed3c96d5298d1c934774.tar.gz
hdf5-9c218ea879de3f13464aed3c96d5298d1c934774.tar.bz2
[svn-r19137] Purpose: Add support for disabling filters on partial edge chunks.
Added two new API functions, H5Pset_chunk_opts and H5Pget_chunk_ops. When the set function is passed H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS, datasets created with that property list will not apply filters to partially used chunks on the edge of the dataspace. Datasets created in this manner will not be readable by 1.8 or older. Tested: jam, linew, amani (h5committest)
-rw-r--r--src/H5Dchunk.c870
-rw-r--r--src/H5Dint.c20
-rw-r--r--src/H5Dmpio.c4
-rw-r--r--src/H5Dpkg.h11
-rw-r--r--src/H5Dpublic.h3
-rw-r--r--src/H5Olayout.c29
-rw-r--r--src/H5Oprivate.h23
-rw-r--r--src/H5Pdcpl.c125
-rw-r--r--src/H5Ppublic.h2
-rw-r--r--test/dsets.c207
-rwxr-xr-xtest/objcopy.c143
-rw-r--r--test/set_extent.c464
-rw-r--r--testpar/t_dset.c206
-rw-r--r--testpar/t_filter_read.c117
14 files changed, 1873 insertions, 351 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 168c011..98dfff9 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -149,6 +149,8 @@ typedef struct H5D_chunk_it_ud3_t {
/* needed for compressed variable-length data */
const H5O_pline_t *pline; /* Filter pipeline */
+ unsigned dset_ndims; /* Number of dimensions in dataset */
+ const hsize_t *dset_dims; /* Dataset dimensions */
/* needed for copy object pointed by refs */
H5O_copy_t *cpy_info; /* Copy options */
@@ -207,6 +209,9 @@ static herr_t H5D_chunk_mem_cb(void *elem, hid_t type_id, unsigned ndims,
const hsize_t *coords, void *fm);
static herr_t H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush);
+static htri_t H5D_chunk_is_partial_edge_chunk(const hsize_t offset[],
+ const H5D_t *dset, unsigned dset_ndims, const hsize_t *dset_dims,
+ const uint32_t *chunk_dims);
/*********************/
@@ -643,7 +648,7 @@ H5D_chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info
#ifdef H5_HAVE_PARALLEL
&& !(io_info->using_mpi_vfd)
#endif /* H5_HAVE_PARALLEL */
- ) {
+ && H5S_SEL_ALL != H5S_GET_SELECT_TYPE(file_space)) {
/* Initialize skip list for chunk selections */
fm->sel_chunks = NULL;
fm->use_single = TRUE;
@@ -861,9 +866,8 @@ H5D_chunk_alloc(size_t size, const H5O_pline_t *pline)
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_alloc)
HDassert(size);
- HDassert(pline);
- if(pline->nused > 0)
+ if(pline && pline->nused)
ret_value = H5MM_malloc(size);
else
ret_value = H5FL_BLK_MALLOC(chunk, size);
@@ -891,10 +895,8 @@ H5D_chunk_xfree(void *chk, const H5O_pline_t *pline)
{
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_xfree)
- HDassert(pline);
-
if(chk) {
- if(pline->nused > 0)
+ if(pline && pline->nused)
H5MM_xfree(chk);
else
chk = H5FL_BLK_FREE(chunk, chk);
@@ -1721,7 +1723,8 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE,
+ FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1754,7 +1757,8 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE,
+ chunk, src_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
@@ -1859,7 +1863,8 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk,
+ FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1911,7 +1916,8 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, chunk,
+ dst_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
/* Advance to next chunk in list */
@@ -2280,6 +2286,7 @@ H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
udata->nbytes = 0;
udata->filter_mask = 0;
udata->addr = HADDR_UNDEF;
+ udata->new_unfilt_chunk = FALSE;
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
@@ -2377,7 +2384,8 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
udata.addr = ent->chunk_addr;
/* Should the chunk be filtered before writing it to disk? */
- if(dset->shared->dcpl_cache.pline.nused) {
+ if(dset->shared->dcpl_cache.pline.nused
+ && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) {
size_t alloc = udata.nbytes; /* Bytes allocated for BUF */
size_t nbytes; /* Chunk size (in bytes) */
@@ -2417,10 +2425,27 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
/* Indicate that the chunk must go through 'insert' method */
must_insert = TRUE;
} /* end if */
- else if(!H5F_addr_defined(udata.addr))
+ else if(!H5F_addr_defined(udata.addr)) {
/* Indicate that the chunk must go through 'insert' method */
must_insert = TRUE;
+ /* This flag could be set for this chunk, just remove and ignore it
+ */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+ else if(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS) {
+ /* Chunk on disk is still filtered, must insert to allocate correct
+ * size */
+ must_insert = TRUE;
+
+ /* Set the disable filters field back to the standard disable
+ * filters setting, as it no longer needs to be inserted with every
+ * flush */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS));
+
/* Check if the chunk needs to be 'inserted' (could exist already and
* the 'insert' operation could resize it)
*/
@@ -2472,7 +2497,9 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
if(buf == ent->chunk)
buf = NULL;
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end if */
done:
@@ -2488,7 +2515,9 @@ done:
*/
if(ret_value < 0 && point_of_no_return) {
if(ent->chunk)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
@@ -2531,7 +2560,9 @@ H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end else */
/* Check for SWMR writes to the file */
@@ -2699,22 +2730,33 @@ done:
* Programmer: Robb Matzke
* Thursday, May 21, 1998
*
+ * Modifications: Neil Fortner
+ * Tuesday, December 15, 2009
+ * Added new_unfilt_chunk parameter - if true indicates that
+ * the chunk just became a partial edge chunk and the dataset
+ * is set to disable filters on partial chunks.
+ * Added prev_unfilt_chunk parameter - if true indicates that
+ * the chunk just had filters re-enabled after being disabled.
+ *
*-------------------------------------------------------------------------
*/
void *
H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
- hbool_t relax)
+ hbool_t relax, hbool_t prev_unfilt_chunk)
{
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
- const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
- const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
- const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D_chunk_alloc */
+ const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
+ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
size_t chunk_size; /*size of a chunk */
+ htri_t is_edge_chunk; /* Whether the chunk is an edge chunk */
+ hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
void *chunk = NULL; /*the file chunk */
unsigned u; /*counters */
void *ret_value; /*return value */
@@ -2727,6 +2769,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HDassert(udata);
HDassert(dset);
HDassert(TRUE == H5P_isa_class(io_info->dxpl_id, H5P_DATASET_XFER));
+ HDassert(!(udata->new_unfilt_chunk && prev_unfilt_chunk));
/* Get the chunk's size */
HDassert(layout->u.chunk.size > 0);
@@ -2751,97 +2794,211 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
* Already in the cache. Count a hit.
*/
rdcc->stats.nhits++;
- } /* end if */
- else if(relax) {
- /*
- * Not in the cache, but we're about to overwrite the whole thing
- * anyway, so just allocate a buffer for it but don't initialize that
- * buffer with the file contents. Count this as a hit instead of a
- * miss because we saved ourselves lots of work.
- */
- rdcc->stats.nhits++;
- /* Still save the chunk address so the cache stays consistent */
- chunk_addr = udata->addr;
+ /* Make adjustments if the edge chunk status changed recently */
+ if(pline->nused) {
+ /* If the chunk recently became an unfiltered partial edge chunk
+ * while in cache, we must make some changes to the entry */
+ if(udata->new_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must not have previously been a
+ * partial chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(old_pline->nused);
+
+ /* Disable filters. Set pline to NULL instead of just the
+ * default pipeline to make a quick failure more likely if the
+ * code is changed in an inappropriate/incomplete way. */
+ pline = NULL;
+
+ /* Reallocate the chunk so H5D_chunk_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters disabled as well as "newly
+ * disabled" so it is inserted on flush */
+ ent->edge_chunk_state |= H5D_RDCC_DISABLE_FILTERS;
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must have previously been a partial
+ * chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(pline->nused);
+
+ /* Mark the old pipeline as having been disabled */
+ old_pline = NULL;
+
+ /* Reallocate the chunk so H5D_chunk_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
- if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
- /* In the case that some dataset functions look through this data,
- * clear it to all 0s. */
- HDmemset(chunk, 0, chunk_size);
+ /* Mark the chunk as having filters enabled */
+ ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS
+ | H5D_RDCC_NEWLY_DISABLED_FILTERS);
+ } /* end else */
+ } /* end if */
} /* end if */
else {
- /*
- * Not in the cache. Count this as a miss if it's in the file
- * or an init if it isn't.
- */
-
- /* Save the chunk address */
- chunk_addr = udata->addr;
-
- /* Check if the chunk exists on disk */
- if(H5F_addr_defined(chunk_addr)) {
- size_t chunk_alloc = 0; /*allocated chunk size */
-
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
- H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
- if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
-
- if(pline->nused) {
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
- H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
+ /* Check if we should disable filters on this chunk */
+ if(pline->nused) {
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Disable the filters for writing */
+ disable_filters = TRUE;
+ pline = NULL;
} /* end if */
+ else if(prev_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
- /* Increment # of cache misses */
- rdcc->stats.nmisses++;
+ /* Mark the filters as having been previously disabled (for the
+ * chunk as currently on disk) - disable the filters for reading
+ */
+ old_pline = NULL;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if this is an edge chunk */
+ if((is_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ io_info->store->chunk.offset, io_info->dset, 0, NULL,
+ layout->u.chunk.dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to determine if chunk is edge chunk")
+
+ if(is_edge_chunk) {
+ /* Disable the filters for both writing and reading */
+ disable_filters = TRUE;
+ old_pline = NULL;
+ pline = NULL;
+ } /* end if */
+ } /* end if */
} /* end if */
- else {
- H5D_fill_value_t fill_status;
+ else
+ HDassert(!udata->new_unfilt_chunk && !prev_unfilt_chunk);
+
+ if(relax) {
+ /*
+ * Not in the cache, but we're about to overwrite the whole thing
+ * anyway, so just allocate a buffer for it but don't initialize that
+ * buffer with the file contents. Count this as a hit instead of a
+ * miss because we saved ourselves lots of work.
+ */
+ rdcc->stats.nhits++;
- /* Sanity check */
- HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY);
+ /* Still save the chunk address so the cache stays consistent */
+ chunk_addr = udata->addr;
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
+ /* In the case that some dataset functions look through this data,
+ * clear it to all 0s. */
+ HDmemset(chunk, 0, chunk_size);
+ } /* end if */
+ else {
+ /*
+ * Not in the cache. Count this as a miss if it's in the file
+ * or an init if it isn't.
+ */
- if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
- (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
- /*
- * The chunk doesn't exist in the file. Replicate the fill
- * value throughout the chunk, if the fill value is defined.
- */
+ /* Save the chunk address */
+ chunk_addr = udata->addr;
+
+ /* Check if the chunk exists on disk */
+ if(H5F_addr_defined(chunk_addr)) {
+ size_t chunk_alloc = 0; /*allocated chunk size */
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc,
+ udata->new_unfilt_chunk ? old_pline : pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
+
+ if(old_pline && old_pline->nused) {
+ if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
+ &(udata->filter_mask),
+ io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb,
+ &chunk_alloc, &chunk_alloc, &chunk) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
+ H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
+
+ /* Reallocate chunk if necessary */
+ if(udata->new_unfilt_chunk) {
+ void *tmp_chunk = chunk;
+
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc,
+ pline))) {
+ (void)H5D_chunk_xfree(tmp_chunk, old_pline);
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ } /* end if */
+ HDmemcpy(chunk, tmp_chunk, chunk_size);
+ (void)H5D_chunk_xfree(tmp_chunk, old_pline);
+ } /* end if */
+ } /* end if */
- /* Initialize the fill value buffer */
- /* (use the compact dataset storage buffer as the fill value buffer) */
- if(H5D_fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
- &dset->shared->dcpl_cache.fill, dset->shared->type,
- dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
- fb_info_init = TRUE;
-
- /* Check for VL datatype & non-default fill value */
- if(fb_info.has_vlen_fill_type)
- /* Fill the buffer with VL datatype fill values */
- if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
+ /* Increment # of cache misses */
+ rdcc->stats.nmisses++;
} /* end if */
- else
- HDmemset(chunk, 0, chunk_size);
+ else {
+ H5D_fill_value_t fill_status;
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+
+ if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
+
+ if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
+ (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
+ /*
+ * The chunk doesn't exist in the file. Replicate the fill
+ * value throughout the chunk, if the fill value is defined.
+ */
+
+ /* Initialize the fill value buffer */
+ /* (use the compact dataset storage buffer as the fill value buffer) */
+ if(H5D_fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
+ &dset->shared->dcpl_cache.fill, dset->shared->type,
+ dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
+ fb_info_init = TRUE;
+
+ /* Check for VL datatype & non-default fill value */
+ if(fb_info.has_vlen_fill_type)
+ /* Fill the buffer with VL datatype fill values */
+ if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
+ } /* end if */
+ else
+ HDmemset(chunk, 0, chunk_size);
- /* Increment # of creations */
- rdcc->stats.ninits++;
+ /* Increment # of creations */
+ rdcc->stats.ninits++;
+ } /* end else */
} /* end else */
} /* end else */
HDassert(chunk_size > 0);
@@ -2888,6 +3045,9 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
ent->locked = 0;
+ ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
+ if(udata->new_unfilt_chunk)
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
ent->dirty = FALSE;
ent->deleted = FALSE;
ent->chunk_addr = chunk_addr;
@@ -3008,11 +3168,33 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
* Note: we have to copy the layout and filter messages so we
* don't discard the `const' qualifier.
*/
+ htri_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */
+
+ /* Check if we should disable filters on this chunk */
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ is_unfiltered_edge_chunk = TRUE;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if((is_unfiltered_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ io_info->store->chunk.offset, io_info->dset, 0, NULL,
+ layout->u.chunk.dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to determine if chunk is edge chunk")
+ } /* end if */
+
if(dirty) {
H5D_rdcc_ent_t fake_ent; /* "fake" chunk cache entry */
HDmemset(&fake_ent, 0, sizeof(fake_ent));
fake_ent.dirty = TRUE;
+ if(is_unfiltered_edge_chunk)
+ fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS;
+ if(udata->new_unfilt_chunk)
+ fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
HDmemcpy(fake_ent.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(fake_ent.offset[0]));
HDassert(layout->u.chunk.size > 0);
fake_ent.chunk_addr = udata->addr;
@@ -3023,7 +3205,8 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
} /* end if */
else {
if(chunk)
- chunk = H5D_chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
+ chunk = H5D_chunk_xfree(chunk, is_unfiltered_edge_chunk ? NULL
+ : &(io_info->dset->shared->dcpl_cache.pline));
} /* end else */
} /* end if */
else {
@@ -3177,13 +3360,17 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated */
hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ size_t chunk_size; /* Size of current chunk in bytes, possibly filtered */
size_t orig_chunk_size; /* Original size of chunk in bytes */
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ const H5O_pline_t def_pline = H5O_CRT_PIPELINE_DEF; /* Default pipeline */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* The fill value status */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
+ void *unfilt_fill_buf = NULL; /* Unfiltered fill value buffer */
+ void **fill_buf = NULL; /* Pointer to the fill buffer to use for a chunk */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
@@ -3200,6 +3387,10 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
int op_dim; /* Current operationg dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
+ hbool_t has_unfilt_edge_chunks = FALSE; /* Whether there are partial edge chunks with disabled filters */
+ hbool_t unfilt_edge_chunk_dim[H5O_LAYOUT_NDIMS]; /* Whether there are unfiltered edge chunks at the edge of each dimension */
+ hsize_t edge_chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */
+ unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */
hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3262,6 +3453,31 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(H5D_get_dxpl_cache(data_dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Calculate the minimum and maximum chunk offsets in each dimension, and
+ * determine if there are any unfiltered partial edge chunks. Note that we
+ * assume here that all elements of space_dim are > 0. This is checked at
+ * the top of this function. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
+ / chunk_dim[op_dim]) * chunk_dim[op_dim];
+ max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
+ * chunk_dim[op_dim];
+
+ /* Calculate if there are unfiltered edge chunks at the edge of this
+ * dimension. Note the edge_chunk_offset is uninitialized for
+ * dimensions where unfilt_edge_chunk_dim is FALSE. Also */
+ if((layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && pline->nused > 0
+ && space_dim[op_dim] % chunk_dim[op_dim] != 0) {
+ has_unfilt_edge_chunks = TRUE;
+ unfilt_edge_chunk_dim[op_dim] = TRUE;
+ edge_chunk_offset[op_dim] = max_unalloc[op_dim];
+ } /* end if */
+ else
+ unfilt_edge_chunk_dim[op_dim] = FALSE;
+ } /* end for */
+
/* Get original chunk size */
H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t);
@@ -3291,12 +3507,25 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
fb_info_init = TRUE;
- /* Check if there are filters which need to be applied to the chunk */
- /* (only do this in advance when the chunk info can be re-used (i.e.
- * it doesn't contain any non-default VL datatype fill values)
- */
- if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
- size_t buf_size = orig_chunk_size;
+ /* Initialize the fill_buf pointer to the buffer in fb_info. If edge
+ * chunk filters are disabled, we will switch the buffer as appropriate
+ * for each chunk. */
+ fill_buf = &fb_info.fill_buf;
+
+ /* Check if there are filters which need to be applied to the chunk */
+ /* (only do this in advance when the chunk info can be re-used (i.e.
+ * it doesn't contain any non-default VL datatype fill values)
+ */
+ if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
+ size_t buf_size = orig_chunk_size;
+ /* If the dataset has disabled partial chunk filters, create a copy
+ * of the unfiltered fill_buf to use for partial chunks */
+ if(has_unfilt_edge_chunks) {
+ if(NULL == (unfilt_fill_buf = H5D_chunk_alloc(orig_chunk_size,
+ &def_pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk")
+ HDmemcpy(unfilt_fill_buf, fb_info.fill_buf, orig_chunk_size);
+ } /* end if */
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
@@ -3316,16 +3545,6 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = &dset->shared->layout.storage.u.chunk;
- /* Calculate the minimum and maximum chunk offsets in each dimension. Note
- * that we assume here that all elements of space_dim are > 0. This is
- * checked at the top of this function */
- for(op_dim=0; op_dim<space_ndims; op_dim++) {
- min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
- / chunk_dim[op_dim]) * chunk_dim[op_dim];
- max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
- * chunk_dim[op_dim];
- } /* end for */
-
/* Loop over all chunks */
/* The algorithm is:
* For each dimension:
@@ -3344,6 +3563,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* certain dimension, max_unalloc is updated in order to avoid allocating
* those chunks again.
*/
+ chunk_size = orig_chunk_size;
for(op_dim=0; op_dim<space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
int i; /* Local index variable */
@@ -3357,12 +3577,33 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* sizeof(chunk_offset[0])));
chunk_offset[op_dim] = min_unalloc[op_dim];
+ if(has_unfilt_edge_chunks) {
+ /* Initialize nunfilt_edge_chunk_dims */
+ nunfilt_edge_chunk_dims = 0;
+ for(i=0; i<space_ndims; i++)
+ if(unfilt_edge_chunk_dim[i] && chunk_offset[i]
+ == edge_chunk_offset[i])
+ nunfilt_edge_chunk_dims++;
+
+ /* Initialize chunk_size and fill_buf */
+ if(should_fill && !fb_info.has_vlen_fill_type) {
+ HDassert(fb_info_init);
+ HDassert(unfilt_fill_buf);
+ if(nunfilt_edge_chunk_dims) {
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ else {
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
} /* end if */
while(!carry) {
- size_t chunk_size = orig_chunk_size; /* Size of chunk in bytes, possibly filtered */
-
#ifndef NDEBUG
/* None of the chunks should be allocated */
{
@@ -3398,6 +3639,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(fb_info_init && fb_info.has_vlen_fill_type) {
/* Sanity check */
HDassert(should_fill);
+ HDassert(!unfilt_fill_buf);
/* Check to make sure the buffer is large enough. It is
* possible (though ill-advised) for the filter to shrink the
@@ -3414,7 +3656,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
/* Check if there are filters which need to be applied to the chunk */
- if(pline->nused > 0) {
+ if(!nunfilt_edge_chunk_dims) {
size_t nbytes = orig_chunk_size;
/* Push the chunk through the filters */
@@ -3430,6 +3672,10 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
} /* end if */
+ else
+ chunk_size = layout->u.chunk.size;
+
+ HDassert(*fill_buf == fb_info.fill_buf);
} /* end if */
/* Initialize the chunk information */
@@ -3457,7 +3703,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE == mpi_rank)
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Indicate that blocks are being written */
@@ -3465,23 +3711,54 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
#endif /* H5_HAVE_PARALLEL */
} /* end if */
- /* Increment indices */
+ /* Increment indices and adjust the edge chunk state */
carry = TRUE;
- for(i = (int)(space_ndims - 1); i >= 0; --i) {
+ for(i = (space_ndims - 1); i >= 0; --i) {
chunk_offset[i] += chunk_dim[i];
- if(chunk_offset[i] > max_unalloc[i])
+ if(chunk_offset[i] > max_unalloc[i]) {
if(i == op_dim)
chunk_offset[i] = min_unalloc[i];
else
chunk_offset[i] = 0;
+
+ /* Check if we just left the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i]
+ && edge_chunk_offset[i] == max_unalloc[i]
+ && chunk_offset[i] < edge_chunk_offset[i]) {
+ nunfilt_edge_chunk_dims--;
+ if(should_fill && nunfilt_edge_chunk_dims == 0
+ && !fb_info.has_vlen_fill_type) {
+ HDassert(!H5D_chunk_is_partial_edge_chunk(
+ chunk_offset, NULL, (unsigned)space_ndims,
+ space_dim, chunk_dim));
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end if */
+ } /* end if */
+ } /* end if */
else {
+ /* Check if we just entered the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i] && chunk_offset[i]
+ == edge_chunk_offset[i]) {
+ HDassert(edge_chunk_offset[i] == max_unalloc[i]);
+ nunfilt_edge_chunk_dims++;
+ if(should_fill && nunfilt_edge_chunk_dims == 1
+ && !fb_info.has_vlen_fill_type) {
+ HDassert(H5D_chunk_is_partial_edge_chunk(
+ chunk_offset, NULL, (unsigned)space_ndims,
+ space_dim, chunk_dim));
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
break;
} /* end else */
@@ -3518,11 +3795,213 @@ done:
if(fb_info_init && H5D_fill_term(&fb_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+ /* Free the unfiltered fill value buffer */
+ unfilt_fill_buf = H5D_chunk_xfree(unfilt_fill_buf, &def_pline);
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_chunk_allocate() */
/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_update_old_edge_chunks
+ *
+ * Purpose: Update all chunks which were previously partial edge
+ * chunks and are now complete. Determines exactly which
+ * chunks need to be updated and locks each into cache using
+ * the 'prev_unfilt_chunk' flag, then unlocks it, causing
+ * filters to be applied as necessary.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * April 14, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[])
+{
+ hsize_t old_edge_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first previously incomplete chunk in each dimension */
+ hsize_t max_edge_chunk_off[H5O_LAYOUT_NDIMS]; /* largest offset of chunks that might need to be modified in each dimension */
+ hbool_t new_full_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of chunks in this dimension needs to be modified */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
+ int space_ndims; /* Dataset's space rank */
+ hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ int op_dim; /* Current operationg dimension */
+ H5D_io_info_t chk_io_info; /* Chunked I/O info object */
+ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
+ H5D_storage_t chk_store; /* Chunk storage information */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ void *chunk; /* The file chunk */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5D_chunk_update_old_edge_chunks, FAIL)
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ HDassert((H5D_CHUNK_IDX_EARRAY == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_EARRAY == layout->storage.u.chunk.ops) ||
+ (H5D_CHUNK_IDX_FARRAY == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_FARRAY == layout->storage.u.chunk.ops) ||
+ (H5D_CHUNK_IDX_BTREE == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_BTREE == layout->storage.u.chunk.ops));
+ HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
+ HDassert(pline->nused > 0);
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Retrieve the dataset dimensions */
+ if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
+ space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_offset[space_ndims] = (hsize_t)0;
+
+ /* Check if any current dimensions are smaller than the chunk size, or if
+ * any old dimensions are 0. If so we do not have to do anything. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++)
+ if((space_dim[op_dim] < chunk_dim[op_dim]) || old_dim[op_dim] == 0) {
+ /* Reset any cached chunk info for this dataset */
+ H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ /*
+ * Initialize structures needed to lock chunks into cache
+ */
+ /* Fill the DXPL cache values for later use */
+ if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Set up chunked I/O info object, for operations on chunks (in callback).
+ * Note that we only need to set chunk_offset once, as the array's address
+ * will never change. */
+ chk_store.chunk.offset = chunk_offset;
+ H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL);
+
+ /*
+ * Determine the edges of the dataset which need to be modified
+ */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Start off with this dimension marked as not needing to be modified */
+ new_full_dim[op_dim] = FALSE;
+
+ /* Calulate offset of first previously incomplete chunk in this
+ * dimension */
+ old_edge_chunk_off[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim])
+ * chunk_dim[op_dim];
+
+ /* Calculate the largest offset of chunks that might need to be
+ * modified in this dimension */
+ max_edge_chunk_off[op_dim] = chunk_dim[op_dim]
+ * MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
+ MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1);
+
+ /* Check for old_dim aligned with chunk boundary in this dimension, if
+ * so we do not need to modify chunks along the edge in this dimension
+ */
+ if(old_dim[op_dim] % chunk_dim[op_dim] == 0)
+ continue;
+
+ /* Check if the dataspace expanded enough to cause the old edge chunks
+ * in this dimension to become full */
+ if(space_dim[op_dim] >= old_edge_chunk_off[op_dim] + chunk_dim[op_dim])
+ new_full_dim[op_dim] = TRUE;
+ } /* end for */
+
+ /* Main loop: fix old edge chunks */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ int i; /* Local index variable */
+
+ /* Check if allocation along this dimension is really necessary */
+ if(!new_full_dim[op_dim])
+ continue;
+ else {
+ HDassert((hsize_t) max_edge_chunk_off[op_dim]
+ == old_edge_chunk_off[op_dim]);
+
+ /* Reset the chunk offset indices */
+ HDmemset(chunk_offset, 0, ((unsigned)space_ndims
+ * sizeof(chunk_offset[0])));
+ chunk_offset[op_dim] = old_edge_chunk_off[op_dim];
+
+ carry = FALSE;
+ } /* end if */
+
+ while(!carry) {
+ /* Make sure the chunk is really a former edge chunk */
+ HDassert(H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, old_dim, chunk_dim)
+ && !H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, space_dim, chunk_dim));
+
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index((unsigned)space_ndims, chunk_offset, chunk_dim,
+ layout->u.chunk.down_chunks,
+ &(chk_io_info.store->chunk.index)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
+
+ /* Lookup the chunk */
+ if(H5D_chunk_lookup(dset, dxpl_id, chunk_offset,
+ chk_io_info.store->chunk.index, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* If this chunk does not exist in cache or on disk, no need to do
+ * anything */
+ if(H5F_addr_defined(chk_udata.addr)
+ || (UINT_MAX != chk_udata.idx_hint)) {
+ /* Lock the chunk into cache. H5D_chunk_lock will take care of
+ * updating the chunk to no longer be an edge chunk. */
+ if(NULL == (chunk = (void *)H5D_chunk_lock(&chk_io_info,
+ &chk_udata, FALSE, TRUE)))
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
+
+ /* Unlock the chunk */
+ if(H5D_chunk_unlock(&chk_io_info, &chk_udata, TRUE,
+ chunk, (uint32_t)0) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Increment indices */
+ carry = TRUE;
+ for(i = (space_ndims - 1); i >= 0; --i) {
+ if(i != op_dim) {
+ chunk_offset[i] += chunk_dim[i];
+ if(chunk_offset[i] > (hsize_t) max_edge_chunk_off[i])
+ chunk_offset[i] = 0;
+ else {
+ carry = FALSE;
+ break;
+ } /* end else */
+ } /* end if */
+ } /* end for */
+ } /* end while(!carry) */
+
+ /* Adjust max_edge_chunk_off so we don't modify the same chunk twice.
+ * Also check if this dimension started from 0 (and hence modified all
+ * of the old edge chunks. */
+ if(old_edge_chunk_off[op_dim] == 0)
+ break;
+ else
+ --max_edge_chunk_off[op_dim];
+ } /* end for(op_dim=0...) */
+
+ /* Reset any cached chunk info for this dataset */
+ H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_chunk_update_old_edge_chunks() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D_chunk_prune_fill
*
* Purpose: Write the fill value to the parts of the chunk that are no
@@ -3536,7 +4015,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
+H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
@@ -3558,6 +4037,7 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
if(H5D_chunk_lookup(dset, io_info->dxpl_id, chunk_offset,
io_info->store->chunk.index, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ chk_udata.new_unfilt_chunk = new_unfilt_chunk;
/* If this chunk does not exist in cache or on disk, no need to do anything
*/
@@ -3591,7 +4071,8 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- if(NULL == (chunk = (void *)H5D_chunk_lock(io_info, &chk_udata, FALSE)))
+ if(NULL == (chunk = (void *)H5D_chunk_lock(io_info, &chk_udata, FALSE,
+ FALSE)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
@@ -3632,7 +4113,8 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
bytes_accessed = (uint32_t)sel_nelmts * layout->u.chunk.dim[rank];
/* Release lock on chunk */
- if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, chunk, bytes_accessed) < 0)
+ if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, chunk,
+ bytes_accessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
done:
@@ -3756,6 +4238,8 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */
int ndims_outside_fill = 0; /* Number of dimensions in chunk offset outside fill dimensions */
+ hsize_t min_partial_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first partial (or empty) chunk in each dimension */
+ hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */
hbool_t has_fill = FALSE; /* Whether there are chunks that must be filled */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
@@ -3778,6 +4262,8 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
+ hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */
+ hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
int i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3867,6 +4353,11 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
idx_udata.layout = &layout->u.chunk;
idx_udata.storage = &layout->storage.u.chunk;
+ /* Determine if partial edge chunk filters are disabled */
+ disable_edge_filters = (layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (idx_info.pline->nused > 0);
+
/*
* Determine the chunks which need to be filled or removed
*/
@@ -3898,12 +4389,31 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
== max_fill_chunk_off[op_dim]) {
fill_dim[op_dim] = TRUE;
has_fill = TRUE;
+
+ /* If necessary, check if chunks in this dimension that need to
+ * be filled are new partial edge chunks */
+ if(disable_edge_filters && old_dim[op_dim]
+ >= (min_mod_chunk_off[op_dim] + chunk_dim[op_dim]))
+ new_unfilt_dim[op_dim] = TRUE;
+ else
+ new_unfilt_dim[op_dim] = FALSE;
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
+
+ /* If necessary, calculate the smallest offset of non-previously full
+ * chunks in this dimension, so we know these chunks were previously
+ * unfiltered */
+ if(disable_edge_filters)
+ min_partial_chunk_off[op_dim] = chunk_dim[op_dim] * (old_dim[op_dim]
+ / chunk_dim[op_dim]);
} /* end for */
/* Check the cache for any entries that are outside the bounds. Mark these
@@ -3957,8 +4467,29 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDassert(fill_dim[op_dim]);
HDassert(chunk_offset[op_dim] == min_mod_chunk_off[op_dim]);
+ /* Make sure this is an edge chunk */
+ HDassert(H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, space_dim, layout->u.chunk.dim));
+
+ /* Determine if the chunk just became an unfiltered chunk */
+ if(new_unfilt_dim[op_dim]) {
+ new_unfilt_chunk = TRUE;
+ for(i=0; i<space_ndims; i++)
+ if(chunk_offset[i] == min_partial_chunk_off[i]) {
+ new_unfilt_chunk = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Make sure that, if we think this is a new unfiltered chunk,
+ * it was previously not an edge chunk */
+ HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk !=
+ !H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, old_dim, layout->u.chunk.dim)));
+ HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]);
+
/* Fill the unused parts of the chunk */
- if(H5D_chunk_prune_fill(&udata) < 0)
+ if(H5D_chunk_prune_fill(&udata, new_unfilt_chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
@@ -4354,7 +4885,7 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
const H5O_pline_t *pline = udata->pline; /* I/O pipeline for applying filters */
/* needed for commpressed variable length data */
- hbool_t has_filters = FALSE; /* Whether chunk has filters */
+ hbool_t must_filter = FALSE; /* Whether chunk must be filtered during copy */
size_t nbytes; /* Size of chunk in file (in bytes) */
H5Z_cb_t cb_struct; /* Filter failure callback struct */
@@ -4376,9 +4907,23 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
} /* end if */
/* Check for filtered chunks */
- if(pline && pline->nused) {
- has_filters = TRUE;
- cb_struct.func = NULL; /* no callback function when failed */
+ if((is_vlen || fix_ref) && pline && pline->nused) {
+ /* Check if we should disable filters on this chunk */
+ if(udata->common.layout->flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ htri_t is_edge_chunk; /* Whether the chunk is an edge chunk */
+
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if((is_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ chunk_rec->offset, NULL, udata->dset_ndims,
+ udata->dset_dims, udata->common.layout->dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5_ITER_ERROR, "unable to determine if chunk is edge chunk")
+
+ if(!is_edge_chunk)
+ must_filter = TRUE;
+ } /* end if */
+ else
+ must_filter = TRUE;
} /* end if */
/* Resize the buf if it is too small to hold the data */
@@ -4408,9 +4953,10 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk")
/* Need to uncompress variable-length & reference data elements */
- if(has_filters && (is_vlen || fix_ref)) {
+ if(must_filter) {
unsigned filter_mask = chunk_rec->filter_mask;
+ cb_struct.func = NULL; /* no callback function when failed */
if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed")
} /* end if */
@@ -4472,7 +5018,7 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
udata_dst.addr = HADDR_UNDEF;
/* Need to compress variable-length & reference data elements before writing to file */
- if(has_filters && (is_vlen || fix_ref) ) {
+ if(must_filter) {
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
#if H5_SIZEOF_SIZE_T > 4
@@ -4527,6 +5073,8 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
H5D_chunk_it_ud3_t udata; /* User data for iteration callback */
H5D_chk_idx_info_t idx_info_dst; /* Dest. chunked index info */
H5D_chk_idx_info_t idx_info_src; /* Source chunked index info */
+ int sndims; /* Rank of dataspace */
+ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
H5O_pline_t _pline; /* Temporary pipeline info */
const H5O_pline_t *pline; /* Pointer to pipeline info to use */
H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */
@@ -4582,8 +5130,6 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
/* Initialize layout information */
{
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
- int sndims; /* Rank of dataspace */
unsigned ndims; /* Rank of dataspace */
/* Get the dim info for dataset */
@@ -4739,6 +5285,8 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
udata.buf_space = buf_space;
udata.nelmts = nelmts;
udata.pline = pline;
+ udata.dset_ndims = (unsigned)sndims;
+ udata.dset_dims = curr_dims;
udata.cpy_info = cpy_info;
/* Iterate over chunks to copy data */
@@ -5168,3 +5716,59 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_nonexistent_readvv() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_is_partial_edge_chunk
+ *
+ * Purpose: Checks to see if the chunk is a partial edge chunk.
+ * Either dset or (dset_dims and dset_ndims) must be
+ * provided.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 19 Nov 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D_chunk_is_partial_edge_chunk(const hsize_t offset[], const H5D_t *dset,
+ unsigned dset_ndims, const hsize_t *dset_dims, const uint32_t *chunk_dims)
+{
+ hsize_t _dset_dims[H5O_LAYOUT_NDIMS]; /* Dataset dimensions */
+ unsigned i; /* Local index variables */
+ htri_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_is_partial_edge_chunk)
+
+ /* Check args */
+ HDassert(offset);
+ HDassert((chunk_dims && dset_dims) || dset);
+
+ /* Get chunk dimensions if not specified */
+ if(!chunk_dims)
+ chunk_dims = dset->shared->layout.u.chunk.dim;
+
+ /* Get dataset dimensions if not specified */
+ if(!dset_dims) {
+ int tmp_ndims;
+ if((tmp_ndims = H5S_get_simple_extent_dims(dset->shared->space,
+ _dset_dims, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to get simple dataspace info")
+
+ dset_dims = _dset_dims;
+ H5_ASSIGN_OVERFLOW(dset_ndims, tmp_ndims, int, unsigned);
+ } /* end if */
+
+ /* check if this is a partial edge chunk */
+ for(i=0; i<dset_ndims; i++)
+ if((offset[i] + chunk_dims[i]) > dset_dims[i]) {
+ ret_value = TRUE;
+ break;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_chunk_is_partial_edge_chunk() */
+
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 032006f..8c4d8cd 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -2306,11 +2306,21 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
* and if the chunks are written
*-------------------------------------------------------------------------
*/
- if(shrink && H5D_CHUNKED == dset->shared->layout.type &&
- (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
- /* Remove excess chunks */
- if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+ if(H5D_CHUNKED == dset->shared->layout.type) {
+ if(shrink && (*dset->shared->layout.ops->is_space_alloc)(
+ &dset->shared->layout.storage))
+ /* Remove excess chunks */
+ if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+
+ /* Update chunks that are no longer edge chunks as a result of
+ * expansion */
+ if(expand && (dset->shared->layout.u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (dset->shared->dcpl_cache.pline.nused > 0))
+ if(H5D_chunk_update_old_edge_chunks(dset, dxpl_id, curr_dims)
+ < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks")
} /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index e646a7b..c32536f 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -1256,7 +1256,7 @@ if(H5DEBUG(D))
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1484,7 +1484,7 @@ if(H5DEBUG(D)) {
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index d5a7e45..63cc010 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -276,6 +276,7 @@ typedef struct H5D_chunk_ud_t {
uint32_t nbytes; /*size of stored data */
unsigned filter_mask; /*excluded filters */
haddr_t addr; /*file address of chunk */
+ hbool_t new_unfilt_chunk; /*whether the chunk just became unfiltered */
} H5D_chunk_ud_t;
/* Typedef for "generic" chunk callbacks */
@@ -505,11 +506,17 @@ typedef struct {
hsize_t size; /* Accumulated number of bytes for the selection */
} H5D_vlen_bufsize_t;
+/* Flags for the "edge_chunk_state" field below */
+#define H5D_RDCC_DISABLE_FILTERS 0x01u /* Disable filters on this chunk */
+#define H5D_RDCC_NEWLY_DISABLED_FILTERS 0x02u /* Filters have been disabled since
+ * the last flush */
+
/* Raw data chunks are cached. Each entry in the cache is: */
typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
hbool_t deleted; /*chunk about to be deleted (do not flush) */
+ unsigned edge_chunk_state; /*states related to edge chunks (see above) */
hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
@@ -640,7 +647,7 @@ H5_DLL hbool_t H5D_chunk_is_space_alloc(const H5O_storage_t *storage);
H5_DLL herr_t H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
const hsize_t *chunk_offset, hsize_t chunk_idx, H5D_chunk_ud_t *udata);
H5_DLL void *H5D_chunk_lock(const H5D_io_info_t *io_info,
- H5D_chunk_ud_t *udata, hbool_t relax);
+ H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk);
H5_DLL herr_t H5D_chunk_unlock(const H5D_io_info_t *io_info,
const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
@@ -649,6 +656,8 @@ H5_DLL herr_t H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
H5_DLL herr_t H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id,
hbool_t full_overwrite, hsize_t old_dim[]);
+H5_DLL herr_t H5D_chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
+ hsize_t old_dim[]);
H5_DLL herr_t H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
const hsize_t *old_dim);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index 056346e..972b33b 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -34,6 +34,9 @@
#define H5D_CHUNK_CACHE_NBYTES_DEFAULT ((size_t) -1)
#define H5D_CHUNK_CACHE_W0_DEFAULT -1.
+/* Bit flags for the H5Pset_chunk_opts() and H5Pget_chunk_opts() */
+#define H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS (0x0002u)
+
/*******************/
/* Public Typedefs */
/*******************/
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index 448e4bf..82227e6 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -35,24 +35,6 @@
/* Local macros */
-/* Flags for chunked layout feature encoding */
-#ifdef NOT_YET
-#define H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE 0x01
-#define H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE 0x02
-#define H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS 0x04
-#define H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS 0x08
-#define H5O_LAYOUT_CHUNK_APPLY_FILTER_TO_PARTIAL_BOUND_CHUNKS 0x10
-#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
- H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE \
- | H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE \
- | H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS \
- | H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS \
- | H5O_LAYOUT_CHUNK_APPLY_FILTER_TO_PARTIAL_BOUND_CHUNKS \
- )
-#else /* NOT_YET */
-#define H5O_LAYOUT_ALL_CHUNK_FLAGS 0
-#endif /* NOT_YET */
-
/* PRIVATE PROTOTYPES */
static void *H5O_layout_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
@@ -264,6 +246,9 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, H5O_t UNUSED *open_oh,
/* Set the storage type */
mesg->storage.type = mesg->type;
+ /* Set the chunked layout flags */
+ mesg->u.chunk.flags = (uint8_t)0;
+
/* Dimensionality */
mesg->u.chunk.ndims = *p++;
if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
@@ -286,16 +271,14 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, H5O_t UNUSED *open_oh,
mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
} /* end if */
else {
- unsigned char flags; /* Flags for encoding group info */
-
/* Get the chunked layout flags */
- flags = *p++;
+ mesg->u.chunk.flags = *p++;
/* Check for valid flags */
/* (Currently issues an error for all non-zero values,
* until features are added for the flags)
*/
- if(flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
+ if(mesg->u.chunk.flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad flag value for message")
/* Dimensionality */
@@ -479,7 +462,7 @@ H5O_layout_encode(H5F_t *f, hbool_t UNUSED disable_shared, uint8_t *p, const voi
} /* end if */
else {
/* Chunk feature flags */
- *p++ = 0; /* (no features supported yet) */
+ *p++ = mesg->u.chunk.flags;
/* Number of dimensions */
HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 1355582..0195561 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -334,6 +334,28 @@ typedef struct H5O_efl_t {
*/
#define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1)
+/* Flags for chunked layout feature encoding */
+#ifdef NOT_YET
+#define H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE 0x01
+#define H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE 0x02
+#define H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS 0x04
+#define H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS 0x08
+#endif /* NOT_YET */
+#define H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS 0x10
+#ifdef NOT_YET
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE \
+ | H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE \
+ | H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS \
+ | H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS \
+ | H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ )
+#else /* NOT_YET */
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ )
+#endif /* NOT_YET */
+
/* Initial version of the layout information. Used when space is allocated */
#define H5O_LAYOUT_VERSION_1 1
@@ -439,6 +461,7 @@ typedef struct H5O_layout_chunk_earray_t {
typedef struct H5O_layout_chunk_t {
H5D_chunk_index_t idx_type; /* Type of chunk index */
+ uint8_t flags; /* Chunk layout flags */
unsigned ndims; /* Num dimensions in chunk */
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
unsigned enc_bytes_per_dim; /* Encoded # of bytes for storing each chunk dimension */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 9624989..e672ad0 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -54,7 +54,7 @@
#define H5D_DEF_STORAGE_COMPACT_INIT {(hbool_t)FALSE, (size_t)0, NULL}
#define H5D_DEF_STORAGE_CONTIG_INIT {HADDR_UNDEF, (hsize_t)0}
#define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, H5D_COPS_BTREE, {{NULL}}}
-#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (unsigned)1, {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, (unsigned)0, (uint32_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}}
+#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (uint8_t)0, (unsigned)1, {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, (unsigned)0, (uint32_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}}
#ifdef H5_HAVE_C99_DESIGNATED_INITIALIZER
#define H5D_DEF_STORAGE_COMPACT {H5D_COMPACT, { .compact = H5D_DEF_STORAGE_COMPACT_INIT }}
#define H5D_DEF_STORAGE_CONTIG {H5D_CONTIGUOUS, { .contig = H5D_DEF_STORAGE_CONTIG_INIT }}
@@ -953,6 +953,129 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_chunk_opts
+ *
+ * Purpose: Sets the options related to chunked storage for a dataset.
+ * The storage must already be set to chunked.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Thursday, January 21, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_chunk_opts(hid_t plist_id, unsigned options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ uint8_t layout_flags = 0; /* "options" translated into layout message flags format */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(H5Pset_chunk_opts, FAIL)
+
+ /* Check arguments */
+ if(options & ~(H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "unknown chunk options")
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P_init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_get(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ /* Translate options into flags that can be used with the layout message */
+ if(options & H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS)
+ layout_flags |= H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS;
+
+ /* Update the layout message, including the version (if necessary) */
+ /* This probably isn't the right way to do this, and should be changed once
+ * this branch gets the "real" way to set the layout version */
+ layout.u.chunk.flags = layout_flags;
+ if(layout.version < H5O_LAYOUT_VERSION_4)
+ layout.version = H5O_LAYOUT_VERSION_4;
+
+ /* Set layout value */
+ if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't set layout")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_chunk_opts
+ *
+ * Purpose: Gets the options related to chunked storage for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Friday, January 22, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_chunk_opts(hid_t plist_id, unsigned *options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(H5Pget_chunk_opts, FAIL)
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P_init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_get(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ if(options) {
+ /* Translate options from flags that can be used with the layout message
+ * to those known to the public */
+ *options = 0;
+ if(layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ *options |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_external
*
* Purpose: Adds an external file to the list of external files. PLIST_ID
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 09eb796..acdac96 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -295,6 +295,8 @@ H5_DLL herr_t H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*
H5_DLL int H5Pget_chunk(hid_t plist_id, int max_ndims, hsize_t dim[]/*out*/);
H5_DLL herr_t H5Pset_external(hid_t plist_id, const char *name, off_t offset,
hsize_t size);
+H5_DLL herr_t H5Pset_chunk_opts(hid_t plist_id, unsigned opts);
+H5_DLL herr_t H5Pget_chunk_opts(hid_t plist_id, unsigned *opts);
H5_DLL int H5Pget_external_count(hid_t plist_id);
H5_DLL herr_t H5Pget_external(hid_t plist_id, unsigned idx, size_t name_size,
char *name/*out*/, off_t *offset/*out*/,
diff --git a/test/dsets.c b/test/dsets.c
index b7ba484..57b9989 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -58,7 +58,8 @@ const char *FILENAME[] = {
"chunk_fast", /* 10 */
"chunk_expand", /* 11 */
"chunk_fixed", /* 12 */
- "copy_dcpl_newfile",
+ "copy_dcpl_newfile",/* 13 */
+ "partial_chunks", /* 14 */
NULL
};
#define FILENAME_BUF_SIZE 1024
@@ -134,6 +135,7 @@ const char *FILENAME[] = {
#define H5Z_FILTER_SET_LOCAL_TEST 308
#define H5Z_FILTER_DEPREC 309
#define H5Z_FILTER_EXPAND 310
+#define H5Z_FILTER_COUNT 311
/* Flags for testing filters */
#define DISABLE_FLETCHER32 0
@@ -214,6 +216,8 @@ const char *FILENAME[] = {
#define DSET_DIM2 200
int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2];
+size_t count_nbytes_read = 0;
+size_t count_nbytes_written = 0;
/* Declarations for test_idx_compatible() */
#define DSET "dset"
@@ -235,6 +239,51 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
static size_t filter_expand(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+static size_t filter_count(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_COUNT[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_COUNT, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "count", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ filter_count, /* The actual filter function */
+}};
+
+
+/*-------------------------------------------------------------------------
+ * Function: filter_count
+ *
+ * Purpose: This filter counts the number of bytes read and written,
+ * incrementing count_nbytes_read or count_nbytes_written as
+ * appropriate.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, March 17, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+filter_count(unsigned int flags, size_t UNUSED cd_nelmts,
+ const unsigned int UNUSED *cd_values, size_t nbytes,
+ size_t UNUSED *buf_size, void UNUSED **buf)
+{
+ if(flags & H5Z_FLAG_REVERSE)
+ count_nbytes_read += nbytes;
+ else
+ count_nbytes_written += nbytes;
+
+ return nbytes;
+}
/*-------------------------------------------------------------------------
@@ -5678,7 +5727,7 @@ test_copy_dcpl(hid_t file, hid_t fapl)
/* Create a second file and create 2 datasets with the copies of the DCPLs in the first
* file. Test whether the copies of DCPLs work. */
- h5_fixname(FILENAME[11], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[13], fapl, filename, sizeof filename);
if((new_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -8275,12 +8324,9 @@ test_idx_compatible(void)
{
hid_t fid; /* File id */
hid_t did; /* Dataset id */
- ssize_t nread; /* Number of bytes read in */
- char *srcdir = HDgetenv("srcdir"); /* where the src code is located */
- char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */
+ const char *filename = NULL; /* old test file name */
unsigned j; /* Local index variable */
H5D_chunk_index_t idx_type; /* Chunked dataset index type */
- herr_t ret; /* Return value */
/* Output message about test being performed */
TESTING("compatibility for 1.6/1.8 datasets that use B-tree indexing");
@@ -8288,15 +8334,11 @@ test_idx_compatible(void)
for(j = 0; j < NELMTS(OLD_FILENAME); j++) {
/* Generate correct name for test file by prepending the source path */
- if(srcdir && ((HDstrlen(srcdir) + HDstrlen(OLD_FILENAME[j]) + 1) < sizeof(filename))) {
- HDstrcpy(filename, srcdir);
- HDstrcat(filename, "/");
- }
- HDstrcat(filename, OLD_FILENAME[j]);
+ filename = H5_get_srcdir_filename(OLD_FILENAME[j]);
/* Open the file */
if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Should be able to read the dataset w/o filter created under 1.8/1.6 */
if((did = H5Dopen2(fid, DSET, H5P_DEFAULT)) < 0)
@@ -8339,6 +8381,146 @@ error:
return -1;
} /* test_idx_compatible */
+/*-------------------------------------------------------------------------
+ *
+ * test_unfiltered_edge_chunks():
+ * Tests that partial edge chunks aren't filtered when the
+ * H5D_CHUNK_FILTER_PARTIAL_CHUNKS option is set.
+ *
+ * Programmer: Neil Fortner; 17th March, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_unfiltered_edge_chunks(hid_t fapl)
+{
+ hid_t fid; /* File id */
+ hid_t did; /* Dataset id */
+ hid_t sid; /* Dataspace id */
+ hid_t dcpl; /* DCPL id */
+ hsize_t dim[2] = {4, 3}; /* Dataset dimensions */
+ hsize_t cdim[2] = {2, 2}; /* Chunk dimension */
+ char wbuf[4][3]; /* Write buffer */
+ char rbuf[4][3]; /* Read buffer */
+ char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */
+ unsigned opts; /* Chunk options */
+ unsigned i, j; /* Local index variables */
+
+ /* Output message about test being performed */
+ TESTING("disabled partial chunk filters");
+
+ h5_fixname(FILENAME[14], fapl, filename, sizeof filename);
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Register byte-counting filter */
+ if(H5Zregister(H5Z_COUNT) < 0)
+ TEST_ERROR
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dim, NULL)) < 0)
+ TEST_ERROR
+
+ /* Create DCPL */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set chunk dimensions */
+ if(H5Pset_chunk(dcpl, 2, cdim) < 0)
+ TEST_ERROR
+
+ /* Add "count" filter */
+ if(H5Pset_filter(dcpl, H5Z_FILTER_COUNT, 0u, (size_t)0, NULL) < 0)
+ TEST_ERROR
+
+ /* Disable filters on partial chunks */
+ if(H5Pget_chunk_opts(dcpl, &opts) < 0)
+ TEST_ERROR
+ opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ if(H5Pset_chunk_opts(dcpl, opts) < 0)
+ TEST_ERROR
+
+ /* Initialize write buffer */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ wbuf[i][j] = (char)(2 * i) - (char)j;
+
+ /* Reset byte counts */
+ count_nbytes_read = (size_t)0;
+ count_nbytes_written = (size_t)0;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_CHAR, sid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Nothing should have been written, as we are not using early allocation */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)0)
+ TEST_ERROR
+
+ /* Write data */
+ if(H5Dwrite(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were written through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Reopen the dataset */
+ if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+
+ /* Verify that data read == data written */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ if(rbuf[i][j] != wbuf[i][j])
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were read through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Close IDs */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_unfiltered_edge_chunks */
+
/*-------------------------------------------------------------------------
* Function: main
@@ -8468,6 +8650,7 @@ main(void)
nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0);
nerrors += (test_idx_compatible() < 0 ? 1 : 0);
+ nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0);
if(H5Fclose(file) < 0)
goto error;
diff --git a/test/objcopy.c b/test/objcopy.c
index 2ab0059..4736af1 100755
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -2951,6 +2951,148 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_copy_dataset_no_edge_filt
+ *
+ * Purpose: Create a compressed, chunked dataset in SRC file and copy it to DST file
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, May 11, 2010
+ * Mostly copied from test_copy_dataset_compressed, by
+ * Quincey Koziol
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t fapl)
+{
+#ifdef H5_HAVE_FILTER_DEFLATE
+ hid_t fid_src = -1, fid_dst = -1; /* File IDs */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t pid = -1; /* Dataset creation property list ID */
+ hid_t did = -1, did2 = -1; /* Dataset IDs */
+ hsize_t dim2d[2]; /* Dataset dimensions */
+ hsize_t chunk_dim2d[2] ={CHUNK_SIZE_1, CHUNK_SIZE_2}; /* Chunk dimensions */
+ float buf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */
+ int i, j; /* Local index variables */
+ char src_filename[NAME_BUF_SIZE];
+ char dst_filename[NAME_BUF_SIZE];
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ TESTING("H5Ocopy(): compressed dataset with no edge filters");
+
+#ifndef H5_HAVE_FILTER_DEFLATE
+ SKIPPED();
+ puts(" Deflation filter not available");
+#else /* H5_HAVE_FILTER_DEFLATE */
+ /* set initial data values */
+ for (i=0; i<DIM_SIZE_1; i++)
+ for (j=0; j<DIM_SIZE_2; j++)
+ buf[i][j] = (float)(100.0); /* Something easy to compress */
+
+ /* Initialize the filenames */
+ h5_fixname(FILENAME[0], fapl, src_filename, sizeof src_filename);
+ h5_fixname(FILENAME[1], fapl, dst_filename, sizeof dst_filename);
+
+ /* Reset file address checking info */
+ addr_reset();
+
+ /* create source file */
+ if((fid_src = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, fapl)) < 0) TEST_ERROR
+
+ /* Set dataspace dimensions */
+ dim2d[0]=DIM_SIZE_1;
+ dim2d[1]=DIM_SIZE_2;
+
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set comp & chunk plist, and disable partial chunk filters */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, chunk_dim2d) < 0) TEST_ERROR
+ if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR
+ if(H5Pset_chunk_opts(pid, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+
+ /* open the source file with read-only */
+ if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR
+
+ /* create destination file */
+ if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, fapl)) < 0) TEST_ERROR
+
+ /* Create an uncopied object in destination file so that addresses in source and destination files aren't the same */
+ if(H5Gclose(H5Gcreate2(fid_dst, NAME_GROUP_UNCOPIED, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* copy the dataset from SRC to DST */
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+ /* close the DST file */
+ if(H5Fclose(fid_dst) < 0) TEST_ERROR
+
+ PASSED();
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ return 0;
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did2);
+ H5Dclose(did);
+ H5Pclose(pid);
+ H5Sclose(sid);
+ H5Fclose(fid_dst);
+ H5Fclose(fid_src);
+ } H5E_END_TRY;
+ return 1;
+#endif /* H5_HAVE_FILTER_DEFLATE */
+} /* end test_copy_dataset_no_edge_filt */
+
+
+/*-------------------------------------------------------------------------
* Function: test_copy_dataset_compact
*
* Purpose: Create a compact dataset in SRC file and copy it to DST file
@@ -8412,6 +8554,7 @@ main(void)
nerrors += test_copy_dataset_chunked_empty(fcpl_src, fcpl_dst, my_fapl);
nerrors += test_copy_dataset_chunked_sparse(fcpl_src, fcpl_dst, my_fapl);
nerrors += test_copy_dataset_compressed(fcpl_src, fcpl_dst, my_fapl);
+ nerrors += test_copy_dataset_no_edge_filt(fcpl_src, fcpl_dst, my_fapl);
nerrors += test_copy_dataset_compact(fcpl_src, fcpl_dst, my_fapl);
nerrors += test_copy_dataset_multi_ohdr_chunks(fcpl_src, fcpl_dst, my_fapl);
nerrors += test_copy_dataset_attr_named_dtype(fcpl_src, fcpl_dst, my_fapl);
diff --git a/test/set_extent.c b/test/set_extent.c
index 0582c41..39cc446 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -49,8 +49,9 @@ const char *FILENAME[] = {
#define CONFIG_COMPRESS 0x01u
#define CONFIG_FILL 0x02u
#define CONFIG_EARLY_ALLOC 0x04u
+#define CONFIG_UNFILT_EDGE 0x08u
#define CONFIG_ALL (CONFIG_COMPRESS + CONFIG_FILL \
- + CONFIG_EARLY_ALLOC)
+ + CONFIG_EARLY_ALLOC + CONFIG_UNFILT_EDGE)
#define FILL_VALUE -1
#define DO_RANKS_PRINT_CONFIG(TEST) { \
printf(" Config:\n"); \
@@ -59,6 +60,8 @@ const char *FILENAME[] = {
printf(" Fill value: %s\n", (do_fillvalue ? "yes" : "no")); \
printf(" Early allocation: %s\n", (config & CONFIG_EARLY_ALLOC ? "yes" \
: "no")); \
+ printf(" Edge chunk filters: %s\n", (config & CONFIG_UNFILT_EDGE \
+ ? "disabled" : "enabled")); \
} /* end DO_RANKS_PRINT_CONFIG */
#define RANK1 1
@@ -81,26 +84,46 @@ const char *FILENAME[] = {
test_random_rank4_dump(NDIM_SETS, dim_log, cdims, J, K, L, M); \
goto error; \
} /* end RAND4_FAIL_DUMP */
+#define RAND4_VL_NITER 40
+#define RAND4_VL_SPARSE_SWITCH 5
-static int do_ranks( hid_t fapl );
+typedef enum rank4_index_t {
+ RANK4_INDEX_BTREE = 0, /* Use b-tree (1/2) as chunk index */
+ RANK4_INDEX_FARRAY, /* Use fixed array as chunk index */
+ RANK4_INDEX_EARRAY, /* Use extensible array as chunk index */
+ RANK4_NINDICES, /* Must be last */
+} rank4_index_t;
+
+static int do_ranks( hid_t fapl, hbool_t new_format );
static int do_layouts( hid_t fapl );
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_random_rank4( hid_t fapl,
hid_t dcpl,
hbool_t do_fillvalue,
- hbool_t do_sparse);
+ hbool_t disable_edge_filters,
+ hbool_t do_sparse,
+ rank4_index_t index_type);
+static int test_random_rank4_vl( hid_t fapl,
+ hid_t dcpl,
+ hbool_t do_fillvalue,
+ hbool_t disable_edge_filters,
+ hbool_t do_sparse,
+ rank4_index_t index_type);
static int test_external( hid_t fapl );
static int test_layouts( H5D_layout_t layout, hid_t fapl );
@@ -174,7 +197,7 @@ int main( void )
H5F_LIBVER_LATEST) < 0) TEST_ERROR
/* Tests which use chunked datasets */
- nerrors += do_ranks( my_fapl ) < 0 ? 1 : 0;
+ nerrors += do_ranks( my_fapl, new_format ) < 0 ? 1 : 0;
} /* end for */
/* Tests which do not use chunked datasets */
@@ -208,10 +231,12 @@ error:
* test with several ranks
*-------------------------------------------------------------------------
*/
-static int do_ranks( hid_t fapl )
+static int do_ranks( hid_t fapl, hbool_t new_format )
{
- hbool_t do_fillvalue = 0;
+ hbool_t do_fillvalue = FALSE;
+ hbool_t disable_edge_filters = FALSE;
+ rank4_index_t index_type;
hid_t dcpl = -1;
int fillvalue = FILL_VALUE;
unsigned config;
@@ -247,6 +272,11 @@ static int do_ranks( hid_t fapl )
if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
TEST_ERROR
+ if(config & CONFIG_UNFILT_EDGE)
+ disable_edge_filters = TRUE;
+ else
+ disable_edge_filters = FALSE;
+
/* Run tests */
if(do_fillvalue) {
unsigned ifset;
@@ -261,29 +291,29 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0)
- {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
@@ -297,23 +327,23 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE)
+ < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE)
+ < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0)
- {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE)
+ < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0)
- {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE)
+ < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
goto error;
} /* end if */
@@ -324,17 +354,55 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0)
TEST_ERROR
- if(test_random_rank4(fapl, dcpl, do_fillvalue, FALSE) < 0) {
- DO_RANKS_PRINT_CONFIG("Randomized rank 4")
- goto error;
- } /* end if */
+ /* Iterate over different index types, but only if using the new format
+ */
+ for(index_type = RANK4_INDEX_BTREE; index_type < RANK4_NINDICES;
+ index_type++) {
+ /* Standard test */
+ if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ FALSE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY ? "farray"
+ : "earray"));
+ goto error;
+ } /* end if */
- if(!(config & CONFIG_EARLY_ALLOC))
- if(test_random_rank4(fapl, dcpl, do_fillvalue, TRUE) < 0) {
- DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation")
+ /* VL test */
+ if(test_random_rank4_vl(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, FALSE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 variable length")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY ? "farray"
+ : "earray"));
goto error;
} /* end if */
+ /* Sparse allocation test (regular and VL) */
+ if(!(config & CONFIG_EARLY_ALLOC)) {
+ if(test_random_rank4(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, TRUE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY
+ ? "farray" : "earray"));
+ goto error;
+ } /* end if */
+ if(test_random_rank4_vl(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, TRUE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 variable length with sparse allocation")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY
+ ? "farray" : "earray"));
+ goto error;
+ } /* end if */
+ } /* end if */
+
+ /* Break out if using the old format */
+ if(!new_format)
+ break;
+ } /* end for */
+
/* Close dcpl */
if(H5Pclose(dcpl) < 0)
TEST_ERROR
@@ -388,6 +456,7 @@ error:
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -469,6 +538,9 @@ static int test_rank1( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write dataset
@@ -894,6 +966,7 @@ error:
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -974,6 +1047,9 @@ static int test_rank2( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* Procedure 1
@@ -1509,6 +1585,7 @@ error:
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -1595,6 +1672,9 @@ static int test_rank3( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write array
@@ -2669,14 +2749,16 @@ error:
*-------------------------------------------------------------------------
*/
static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
- hbool_t do_sparse )
+ hbool_t disable_edge_filters, hbool_t do_sparse,
+ rank4_index_t index_type )
{
hid_t file = -1;
hid_t dset = -1;
hid_t fspace = -1;
hid_t mspace = -1;
hid_t my_dcpl = -1;
- hsize_t dims[4]; /* Dataset's dimensions */
+ hsize_t dims[4] = {10, 10, 10, 10}; /* Dataset's dimensions */
+ hsize_t max_dims[4] = {10, 10, 10, 10}; /* Maximum dimensions */
hsize_t old_dims[4]; /* Old dataset dimensions */
hsize_t min_unwritten_dims[4]; /* Minimum dimensions since last write */
hsize_t *valid_dims = old_dims; /* Dimensions of region still containing written data */
@@ -2688,31 +2770,53 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
static hsize_t dim_log[RAND4_NITER+1][4]; /* Log of dataset dimensions */
hbool_t zero_dim = FALSE; /* Whether a dimension is 0 */
hbool_t writing = TRUE; /* Whether we're writing to the dset */
+ unsigned scalar_iter; /* Iteration to shrink dset to 1x1x1x1 */
volatile unsigned i, j, k, l, m; /* Local indices */
char filename[NAME_BUF_SIZE];
+ /*!FIXME Skip the test if a fixed array index is requested, as resizing
+ * fixed arrays is broken now. Extensible arrays are also broken. Remove
+ * these lines as appropriate when these problems are fixed. */
+ if(index_type == RANK4_INDEX_FARRAY || index_type == RANK4_INDEX_EARRAY)
+ return 0;
+
/* create a new file */
h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
TEST_ERROR
+ /* Set maximum dimensions as appropriate for index type */
+ if(index_type == RANK4_INDEX_BTREE)
+ for(i=0; i<4; i++)
+ max_dims[i] = H5S_UNLIMITED;
+ else if(index_type == RANK4_INDEX_EARRAY)
+ max_dims[1] = H5S_UNLIMITED;
+
/* Generate random chunk dimensions, 2-4 */
for(i=0; i<4; i++)
cdims[i] = (hsize_t)((HDrandom() % 3) + 2);
- /* Generate initial dataset size, 1-10 */
+ /* Pick iteration to shrink dataset to 1x1x1x1 */
+ scalar_iter = (unsigned)(HDrandom() % RAND4_NITER);
+
+ /* Generate initial dataset size, 1-10, unless using fixed array index or
+ * scalar_iter is 0 */
for(i=0; i<4; i++) {
- dims[i] = (hsize_t)((HDrandom() % 10) + 1);
+ dims[i] = (hsize_t)(index_type != RANK4_INDEX_FARRAY
+ ? (0 == scalar_iter ? 1 : ((HDrandom() % 10) + 1)) : 10);
dim_log[0][i] = dims[i];
} /* end for */
/* Create dataset */
- if((fspace = H5Screate_simple(4, dims, mdims)) < 0)
+ if((fspace = H5Screate_simple(4, dims, max_dims)) < 0)
TEST_ERROR
if((my_dcpl = H5Pcopy(dcpl)) < 0)
TEST_ERROR
if(H5Pset_chunk(my_dcpl, 4, cdims) < 0)
TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_INT, fspace, H5P_DEFAULT,
my_dcpl, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -2728,8 +2832,9 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
/* Main loop */
for(i=0; i<RAND4_NITER; i++) {
+
+ /* Generate random write buffer */
if(writing && !zero_dim) {
- /* Generate random write buffer */
for(j=0; j<dims[0]; j++)
for(k=0; k<dims[1]; k++)
for(l=0; l<dims[2]; l++)
@@ -2742,11 +2847,13 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
RAND4_FAIL_DUMP(i+1, -1, -1, -1, -1)
} /* end if */
- /* Generate new dataset size, 0-10 (0 much less likely) */
+ /* Generate new dataset size, 0-10 (0 much less likely). If i is
+ * scalar_iter, set all dims to 1. */
zero_dim = FALSE;
for(j=0; j<4; j++) {
old_dims[j] = dims[j];
- if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
+ if((dims[j] = (hsize_t)(i == scalar_iter ? 1 : (HDrandom() % 11)))
+ == 0)
if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
zero_dim = TRUE;
dim_log[i+1][j] = dims[j];
@@ -2833,6 +2940,289 @@ error:
return -1;
} /* end test_random_rank4 */
+/*-------------------------------------------------------------------------
+ * Function: test_random_rank4_vl
+ *
+ * Purpose: Test expanding and shrinking a rank 4 dataset with
+ * variable length data in a randomized fashion. Verifies
+ * that data is preserved (and filled, if do_fillvalue is
+ * true) as expected.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * Tueday, June 29, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int test_random_rank4_vl( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
+ hbool_t disable_edge_filters, hbool_t do_sparse,
+ rank4_index_t index_type )
+{
+ hid_t file = -1;
+ hid_t dset = -1;
+ hid_t type = -1;
+ hid_t fspace = -1;
+ hid_t mspace = -1;
+ hid_t my_dcpl = -1;
+ hsize_t dims[4] = {10, 10, 10, 10}; /* Dataset's dimensions */
+ hsize_t max_dims[4] = {10, 10, 10, 10}; /* Maximum dimensions */
+ hsize_t old_dims[4]; /* Old dataset dimensions */
+ hsize_t min_unwritten_dims[4]; /* Minimum dimensions since last write */
+ hsize_t *valid_dims = old_dims; /* Dimensions of region still containing written data */
+ hsize_t cdims[4]; /* Chunk dimensions */
+ const hsize_t mdims[4] = {10, 10, 10, 10}; /* Memory buffer dimensions */
+ const hsize_t start[4] = {0, 0, 0, 0}; /* Start for hyperslab operations on memory */
+ static hvl_t rbuf[10][10][10][10]; /* Read buffer */
+ static hvl_t wbuf[10][10][10][10]; /* Write buffer */
+ static hsize_t dim_log[RAND4_NITER+1][4]; /* Log of dataset dimensions */
+ hbool_t zero_dim = FALSE; /* Whether a dimension is 0 */
+ hbool_t writing = TRUE; /* Whether we're writing to the dset */
+ hvl_t fill_value; /* Fill value */
+ unsigned scalar_iter; /* Iteration to shrink dset to 1x1x1x1 */
+ volatile unsigned i, j, k, l, m; /* Local indices */
+ char filename[NAME_BUF_SIZE];
+
+ /*!FIXME Skip the test if a fixed array index is requested, as resizing
+ * fixed arrays is broken now. Extensible arrays are also broken. Remove
+ * these lines as appropriate when these problems are fixed. */
+ if(index_type == RANK4_INDEX_FARRAY || index_type == RANK4_INDEX_EARRAY)
+ return 0;
+
+ /* Initialize fill value buffers so they aren't freed in case of an error */
+ fill_value.len = 0;
+ fill_value.p = NULL;
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ rbuf[i][j][k][l].len = 0;
+ rbuf[i][j][k][l].p = NULL;
+ wbuf[i][j][k][l].len = 0;
+ wbuf[i][j][k][l].p = NULL;
+ } /* end for */
+
+ /* Allocate space for VL write buffers, since these never need to be
+ * reallocated */
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ wbuf[i][j][k][l].len = 2;
+ if(NULL == (wbuf[i][j][k][l].p = HDmalloc(2 * sizeof(int))))
+ TEST_ERROR;
+ } /* end for */
+
+ /* create a new file */
+ h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
+ if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Create VL type */
+ if((type = H5Tvlen_create(H5T_NATIVE_INT)) < 0)
+ TEST_ERROR
+
+ /* Set maximum dimensions as appropriate for index type */
+ if(index_type == RANK4_INDEX_BTREE)
+ for(i=0; i<4; i++)
+ max_dims[i] = H5S_UNLIMITED;
+ else if(index_type == RANK4_INDEX_EARRAY)
+ max_dims[1] = H5S_UNLIMITED;
+
+ /* Generate random chunk dimensions, 2-4 */
+ for(i=0; i<4; i++)
+ cdims[i] = (hsize_t)((HDrandom() % 3) + 2);
+
+ /* Pick iteration to shrink dataset to 1x1x1x1 */
+ scalar_iter = (unsigned)(HDrandom() % RAND4_NITER);
+
+ /* Generate initial dataset size, 1-10, unless using fixed array index or
+ * scalar_iter is 0 */
+ for(i=0; i<4; i++) {
+ dims[i] = (hsize_t)(index_type != RANK4_INDEX_FARRAY
+ ? (0 == scalar_iter ? 1 : ((HDrandom() % 10) + 1)) : 10);
+ dim_log[0][i] = dims[i];
+ } /* end for */
+
+ /* Make a copy of the dcpl */
+ if((my_dcpl = H5Pcopy(dcpl)) < 0)
+ TEST_ERROR
+
+ /* Create VL fill value, if requested */
+ if(do_fillvalue) {
+ fill_value.len = 2;
+ if(NULL == (fill_value.p = HDmalloc(2 * sizeof(int))))
+ TEST_ERROR
+ ((int *)fill_value.p)[0] = 1;
+ ((int *)fill_value.p)[1] = 2;
+ if(H5Pset_fill_value(my_dcpl, type, &fill_value) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* Create dataset */
+ if((fspace = H5Screate_simple(4, dims, max_dims)) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(my_dcpl, 4, cdims) < 0)
+ TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
+ if((dset = H5Dcreate2(file, "dset", type, fspace, H5P_DEFAULT, my_dcpl,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Sclose(fspace) < 0)
+ TEST_ERROR
+
+ /* Create memory space, and set initial selection */
+ if((mspace = H5Screate_simple(4, mdims, NULL)) < 0)
+ TEST_ERROR
+ if(H5Sselect_hyperslab(mspace, H5S_SELECT_SET, start, NULL, dims, NULL)
+ < 0)
+ TEST_ERROR
+
+ /* Main loop */
+ for(i=0; i<RAND4_VL_NITER; i++) {
+
+ /* Generate random write buffer */
+ if(writing && !zero_dim) {
+ for(j=0; j<dims[0]; j++)
+ for(k=0; k<dims[1]; k++)
+ for(l=0; l<dims[2]; l++)
+ for(m=0; m<dims[3]; m++) {
+ ((int *)wbuf[j][k][l][m].p)[0] = HDrandom();
+ ((int *)wbuf[j][k][l][m].p)[1] = HDrandom();
+ } /* end for */
+
+ /* Write data */
+ if(H5Dwrite(dset, type, mspace, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ RAND4_FAIL_DUMP(i+1, -1, -1, -1, -1)
+ } /* end if */
+
+ /* Generate new dataset size, 0-10 (0 much less likely). If i is
+ * scalar_iter, set all dims to 1. */
+ zero_dim = FALSE;
+ for(j=0; j<4; j++) {
+ old_dims[j] = dims[j];
+ if((dims[j] = (hsize_t)(i == scalar_iter ? 1 : (HDrandom() % 11)))
+ == 0)
+ if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
+ zero_dim = TRUE;
+ dim_log[i+1][j] = dims[j];
+ } /* end for */
+
+ /* If writing is disabled, update min_unwritten_dims */
+ if(!writing)
+ for(j=0; j<4; j++)
+ if(old_dims[j] < min_unwritten_dims[j])
+ min_unwritten_dims[j] = old_dims[j];
+
+ /* Resize dataset */
+ if(H5Dset_extent(dset, dims) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+
+ if(!zero_dim) {
+ /* Read data from resized dataset */
+ if(H5Sselect_hyperslab(mspace, H5S_SELECT_SET, start, NULL, dims,
+ NULL) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+ if(H5Dread(dset, type, mspace, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+
+ /* Verify correctness of read data */
+ if(do_fillvalue) {
+ for(j=0; j<dims[0]; j++)
+ for(k=0; k<dims[1]; k++)
+ for(l=0; l<dims[2]; l++)
+ for(m=0; m<dims[3]; m++)
+ if(j >= valid_dims[0] || k >= valid_dims[1]
+ || l >= valid_dims[2]
+ || m >= valid_dims[3]) {
+ if(((int *)fill_value.p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)fill_value.p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end if */
+ else
+ if(((int *)wbuf[j][k][l][m].p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)wbuf[j][k][l][m].p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end if */
+ else {
+ for(j=0; j<MIN(dims[0],valid_dims[0]); j++)
+ for(k=0; k<MIN(dims[1],valid_dims[1]); k++)
+ for(l=0; l<MIN(dims[2],valid_dims[2]); l++)
+ for(m=0; m<MIN(dims[3],valid_dims[3]); m++)
+ if(((int *)wbuf[j][k][l][m].p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)wbuf[j][k][l][m].p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end else */
+
+ /* Free read buffer */
+ if(H5Dvlen_reclaim(type, mspace, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* Handle the switch between writing and not writing */
+ if(do_sparse && !(i % RAND4_VL_SPARSE_SWITCH)) {
+ writing = !writing;
+ if(!writing) {
+ for(j=0; j<4; j++)
+ min_unwritten_dims[j] = old_dims[j];
+ valid_dims = min_unwritten_dims;
+ } /* end if */
+ else
+ valid_dims = old_dims;
+ } /* end if */
+ } /* end for */
+
+ /* Close */
+ if(H5Sselect_all(mspace) < 0)
+ TEST_ERROR
+ if(H5Dvlen_reclaim(type, mspace, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+ free(fill_value.p);
+ if(H5Sclose(mspace) < 0)
+ TEST_ERROR
+ if(H5Pclose(my_dcpl) < 0)
+ TEST_ERROR
+ if(H5Dclose(dset) < 0)
+ TEST_ERROR
+ if(H5Tclose(type) < 0)
+ TEST_ERROR
+ if(H5Fclose(file) < 0)
+ TEST_ERROR
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ if(rbuf[i][j][k][l].p)
+ HDfree(rbuf[i][j][k][l].p);
+ if(wbuf[i][j][k][l].p)
+ HDfree(wbuf[i][j][k][l].p);
+ } /* end for */
+ if(fill_value.p)
+ HDfree(fill_value.p);
+ H5Sclose(fspace);
+ H5Sclose(mspace);
+ H5Pclose(dcpl);
+ H5Dclose(dset);
+ H5Tclose(type);
+ H5Fclose(file);
+ } H5E_END_TRY
+ return -1;
+} /* end test_random_rank4_vl */
+
/*
* test_random_rank4_dump: Dump debugging info from test_random_rank4 to screen
* after failure.
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 21e25b6..e4da0d3 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2142,6 +2142,8 @@ compress_readAll(void)
int rank=1; /* Dataspace rank */
hsize_t dim=dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
DATATYPE *data_read = NULL; /* data buffer */
DATATYPE *data_orig = NULL; /* expected data buffer */
@@ -2169,116 +2171,132 @@ compress_readAll(void)
for(u=0; u<dim;u++)
data_orig[u]=u;
- /* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
-
- /* Create the file */
- fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((fid > 0), "H5Fcreate succeeded");
-
- /* Create property list for chunking and compression */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_layout(dcpl, H5D_CHUNKED);
- VRFY((ret >= 0), "H5Pset_layout succeeded");
-
- /* Use eight chunks */
- chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
+ /* Run test both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if(mpi_rank==0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if(disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
- ret = H5Pset_deflate(dcpl, 9);
- VRFY((ret >= 0), "H5Pset_deflate succeeded");
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
- /* Create dataspace */
- dataspace = H5Screate_simple(rank, &dim, NULL);
- VRFY((dataspace > 0), "H5Screate_simple succeeded");
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
- /* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dcreate2 succeeded");
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "");
- /* Write compressed data */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
- VRFY((ret >= 0), "H5Dwrite succeeded");
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
- /* Close objects */
- ret = H5Pclose(dcpl);
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Sclose(dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- /* Wait for file to be created */
- MPI_Barrier(comm);
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
- VRFY((fid > 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- /* Open dataset with compressed chunks */
- dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dopen2 succeeded");
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
- /* Try reading & writing data */
- if(dataset>0) {
- /* Create dataset transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+ /* Try reading & writing data */
+ if(dataset>=0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
- /* Try reading the data */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
- nerrors++;
- }
+ /* Verify data read */
+ for(u=0; u<dim; u++)
+ if(data_orig[u]!=data_read[u]) {
+ printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ nerrors++;
+ }
- /* Writing to the compressed, chunked dataset in parallel should fail */
- H5E_BEGIN_TRY {
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- } H5E_END_TRY;
- VRFY((ret < 0), "H5Dwrite failed");
+ /* Writing to the compressed, chunked dataset in parallel should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ } H5E_END_TRY;
+ VRFY((ret < 0), "H5Dwrite failed");
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- } /* end if */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
/* release data buffers */
if(data_read) HDfree(data_read);
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index f38b30e..8d6b5a3 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -213,6 +213,8 @@ test_filter_read(void)
hid_t dc; /* HDF5 IDs */
const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset without filters */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char *filename;
#ifdef H5_HAVE_FILTER_FLETCHER32
@@ -258,75 +260,104 @@ test_filter_read(void)
hrc = H5Pclose (dc);
VRFY(hrc>=0,"H5Pclose");
- /*----------------------------------------------------------
- * STEP 1: Test Fletcher32 Checksum by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_FLETCHER32
+ /* Run steps 1-3 both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Set chunk options appropriately */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pcreate");
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pset_filter");
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pget_chunk_opts(dc, &chunk_opts);
+ VRFY(hrc>=0,"H5Pget_chunk_opts");
- hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
- VRFY(hrc>=0,"H5Pset_filter");
+ if(disable_partial_chunk_filters)
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
- filter_read_internal(filename,dc,&fletcher32_size);
- VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0,"H5Pclose");
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ /*----------------------------------------------------------
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_FLETCHER32
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pset_filter");
- /*----------------------------------------------------------
- * STEP 2: Test deflation by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ filter_read_internal(filename,dc,&fletcher32_size);
+ VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
- filter_read_internal(filename,dc,&deflate_size);
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+#endif /* H5_HAVE_FILTER_FLETCHER32 */
-#endif /* H5_HAVE_FILTER_DEFLATE */
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
- /*----------------------------------------------------------
- * STEP 3: Test szip compression by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_SZIP
- if(h5_szip_can_encode() == 1) {
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
hrc = H5Pset_chunk (dc, 2, chunk_size);
VRFY(hrc>=0, "H5Pset_chunk");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
- filter_read_internal(filename,dc,&szip_size);
+ hrc = H5Pset_deflate (dc, 6);
+ VRFY(hrc>=0, "H5Pset_deflate");
+
+ filter_read_internal(filename,dc,&deflate_size);
/* Clean up objects used for this test */
hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
- }
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+ if(h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0, "H5Pcreate");
+
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc>=0, "H5Pset_szip");
+
+ filter_read_internal(filename,dc,&szip_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
+ }
#endif /* H5_HAVE_FILTER_SZIP */
+ } /* end for */
/*----------------------------------------------------------
* STEP 4: Test shuffling by itself.