summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5Dchunk.c651
-rw-r--r--src/H5Dint.c18
-rw-r--r--src/H5Dlayout.c34
-rw-r--r--src/H5Dpkg.h3
-rw-r--r--src/H5Dpublic.h3
-rw-r--r--src/H5Olayout.c128
-rw-r--r--src/H5Oprivate.h7
-rw-r--r--src/H5Pdcpl.c123
-rw-r--r--src/H5Ppublic.h2
-rw-r--r--test/dsets.c227
-rw-r--r--test/objcopy.c141
-rw-r--r--test/set_extent.c54
-rw-r--r--testpar/t_dset.c206
-rw-r--r--testpar/t_filter_read.c116
14 files changed, 1452 insertions, 261 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index d8e8eb5..309c7a8 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -103,6 +103,11 @@
/*#define H5D_CHUNK_DEBUG */
+/* Flags for the "edge_chunk_state" field below */
+#define H5D_RDCC_DISABLE_FILTERS 0x01u /* Disable filters on this chunk */
+#define H5D_RDCC_NEWLY_DISABLED_FILTERS 0x02u /* Filters have been disabled since
+ * the last flush */
+
/******************/
/* Local Typedefs */
@@ -113,6 +118,7 @@ typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
hbool_t deleted; /*chunk about to be deleted */
+ unsigned edge_chunk_state; /*states related to edge chunks (see above) */
hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
@@ -174,6 +180,8 @@ typedef struct H5D_chunk_it_ud3_t {
/* needed for compressed variable-length data */
const H5O_pline_t *pline; /* Filter pipeline */
+ unsigned dset_ndims; /* Number of dimensions in dataset */
+ const hsize_t *dset_dims; /* Dataset dimensions */
/* needed for copy object pointed by refs */
H5O_copy_t *cpy_info; /* Copy options */
@@ -264,17 +272,18 @@ static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush);
+static hbool_t H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims,
+ const uint32_t *chunk_dims, const hsize_t *chunk_scaled, const hsize_t *dset_dims);
static void *H5D__chunk_lock(const H5D_io_info_t *io_info,
- H5D_chunk_ud_t *udata, hbool_t relax);
+ H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk);
static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info,
const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, size_t size);
-static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata);
+static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk);
static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info,
- const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert,
- hsize_t scaled[]);
+ const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert);
#ifdef H5_HAVE_PARALLEL
static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf);
@@ -420,7 +429,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
/* Create the chunk it if it doesn't exist, or reallocate the chunk
* if its size changed.
*/
- if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
/* Make sure the address of the chunk is returned. */
@@ -1786,6 +1795,7 @@ htri_t
H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_op)
{
const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ hbool_t has_filters = FALSE; /* Whether there are filters on the chunk or not */
htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1794,8 +1804,23 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_
HDassert(io_info);
HDassert(dataset);
- /* Must bring the whole chunk in if there are any filters */
- if(dataset->shared->dcpl_cache.pline.nused > 0)
+ /* Must bring the whole chunk in if there are any filters on the chunk.
+ * Make sure to check if filters are on the dataset but disabled for the
+ * chunk because it is a partial edge chunk. */
+ if(dataset->shared->dcpl_cache.pline.nused > 0) {
+ if(dataset->shared->layout.u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ has_filters = !H5D__chunk_is_partial_edge_chunk(
+ io_info->dset->shared->ndims,
+ io_info->dset->shared->layout.u.chunk.dim,
+ io_info->store->chunk.scaled,
+ io_info->dset->shared->curr_dims);
+ } /* end if */
+ else
+ has_filters = TRUE;
+ } /* end if */
+
+ if(has_filters)
ret_value = TRUE;
else {
#ifdef H5_HAVE_PARALLEL
@@ -1957,7 +1982,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE)))
+ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -2091,7 +2116,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -2114,7 +2139,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
/* Allocate the chunk */
- if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, chunk_info->scaled) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Make sure the address of the chunk is returned. */
@@ -2614,6 +2639,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
udata->chunk_block.offset = HADDR_UNDEF;
udata->chunk_block.length = 0;
udata->filter_mask = 0;
+ udata->new_unfilt_chunk = FALSE;
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
@@ -2742,7 +2768,8 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
udata.chunk_idx = ent->chunk_idx;
/* Should the chunk be filtered before writing it to disk? */
- if(dset->shared->dcpl_cache.pline.nused) {
+ if(dset->shared->dcpl_cache.pline.nused
+ && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) {
size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */
size_t nbytes; /* Chunk size (in bytes) */
@@ -2781,10 +2808,27 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Indicate that the chunk must be allocated */
must_alloc = TRUE;
} /* end if */
- else if(!H5F_addr_defined(udata.chunk_block.offset))
+ else if(!H5F_addr_defined(udata.chunk_block.offset)) {
/* Indicate that the chunk must be allocated */
must_alloc = TRUE;
+ /* This flag could be set for this chunk, just remove and ignore it
+ */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+ else if(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS) {
+ /* Chunk on disk is still filtered, must insert to allocate correct
+ * size */
+ must_alloc = TRUE;
+
+ /* Set the disable filters field back to the standard disable
+ * filters setting, as it no longer needs to be inserted with every
+ * flush */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS));
+
/* Check if the chunk needs to be allocated (it also could exist already
* and the chunk alloc operation could resize it)
*/
@@ -2799,7 +2843,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Create the chunk it if it doesn't exist, or reallocate the chunk
* if its size changed.
*/
- if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Update the chunk entry's info, in case it was allocated or relocated */
@@ -2834,7 +2878,9 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
if(buf == ent->chunk)
buf = NULL;
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
} /* end if */
done:
@@ -2850,7 +2896,9 @@ done:
*/
if(ret_value < 0 && point_of_no_return)
if(ent->chunk)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_flush_entry() */
@@ -2892,7 +2940,9 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
} /* end else */
/* Unlink from list */
@@ -3071,10 +3121,11 @@ done:
*/
static void *
H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
- hbool_t relax)
+ hbool_t relax, hbool_t prev_unfilt_chunk)
{
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */
+ const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
@@ -3082,6 +3133,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent; /*cache entry */
size_t chunk_size; /*size of a chunk */
+ hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
void *chunk = NULL; /*the file chunk */
void *ret_value = NULL; /* Return value */
@@ -3094,6 +3146,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HDassert(dset);
HDassert(TRUE == H5P_isa_class(io_info->md_dxpl_id, H5P_DATASET_XFER));
HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER));
+ HDassert(!(udata->new_unfilt_chunk && prev_unfilt_chunk));
HDassert(!rdcc->tmp_head);
/* Get the chunk's size */
@@ -3124,6 +3177,66 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
*/
rdcc->stats.nhits++;
+ /* Make adjustments if the edge chunk status changed recently */
+ if(pline->nused) {
+ /* If the chunk recently became an unfiltered partial edge chunk
+ * while in cache, we must make some changes to the entry */
+ if(udata->new_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must not have previously been a
+ * partial chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(old_pline->nused);
+
+ /* Disable filters. Set pline to NULL instead of just the
+ * default pipeline to make a quick failure more likely if the
+ * code is changed in an inappropriate/incomplete way. */
+ pline = NULL;
+
+ /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters disabled as well as "newly
+ * disabled" so it is inserted on flush */
+ ent->edge_chunk_state |= H5D_RDCC_DISABLE_FILTERS;
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must have previously been a partial
+ * chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(pline->nused);
+
+ /* Mark the old pipeline as having been disabled */
+ old_pline = NULL;
+
+ /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters enabled */
+ ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS
+ | H5D_RDCC_NEWLY_DISABLED_FILTERS);
+ } /* end else */
+ } /* end if */
+
/*
* If the chunk is not at the beginning of the cache; move it backward
* by one slot. This is how we implement the LRU preemption
@@ -3152,6 +3265,39 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
chunk_addr = udata->chunk_block.offset;
chunk_alloc = udata->chunk_block.length;
+ /* Check if we should disable filters on this chunk */
+ if(pline->nused) {
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Disable the filters for writing */
+ disable_filters = TRUE;
+ pline = NULL;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Mark the filters as having been previously disabled (for the
+ * chunk as currently on disk) - disable the filters for reading
+ */
+ old_pline = NULL;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if this is an edge chunk */
+ if(H5D__chunk_is_partial_edge_chunk(io_info->dset->shared->ndims,
+ layout->u.chunk.dim, io_info->store->chunk.scaled,
+ io_info->dset->shared->curr_dims)) {
+ /* Disable the filters for both writing and reading */
+ disable_filters = TRUE;
+ old_pline = NULL;
+ pline = NULL;
+ } /* end if */
+ } /* end if */
+ } /* end if */
+
if(relax) {
/*
* Not in the cache, but we're about to overwrite the whole thing
@@ -3181,16 +3327,32 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
/* Chunk size on disk isn't [likely] the same size as the final chunk
* size in memory, so allocate memory big enough. */
- if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline)))
+ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, (udata->new_unfilt_chunk ? old_pline : pline))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->raw_dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
- if(pline->nused)
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0)
+ if(old_pline && old_pline->nused) {
+ if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
+ &(udata->filter_mask),
+ io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb,
+ &my_chunk_alloc, &buf_alloc, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
+ /* Reallocate chunk if necessary */
+ if(udata->new_unfilt_chunk) {
+ void *tmp_chunk = chunk;
+
+ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline))) {
+ (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ } /* end if */
+ HDmemcpy(chunk, tmp_chunk, chunk_size);
+ (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
+ } /* end if */
+ } /* end if */
+
/* Increment # of cache misses */
rdcc->stats.nmisses++;
} /* end if */
@@ -3259,6 +3421,10 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
+ ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
+ if(udata->new_unfilt_chunk)
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+
/* Initialize the new entry */
ent->chunk_block.offset = chunk_addr;
ent->chunk_block.length = chunk_alloc;
@@ -3367,12 +3533,30 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
* It's not in the cache, probably because it's too big. If it's
* dirty then flush it to disk. In any case, free the chunk.
*/
+ hbool_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */
+
+ /* Check if we should disable filters on this chunk */
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ is_unfiltered_edge_chunk = TRUE;
+ } /* end if */
+ else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ is_unfiltered_edge_chunk = H5D__chunk_is_partial_edge_chunk(
+ io_info->dset->shared->ndims, layout->u.chunk.dim,
+ io_info->store->chunk.scaled, io_info->dset->shared->curr_dims);
+ } /* end if */
+
if(dirty) {
H5D_rdcc_ent_t fake_ent; /* "fake" chunk cache entry */
HDmemset(&fake_ent, 0, sizeof(fake_ent));
fake_ent.dirty = TRUE;
- HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
+ if(is_unfiltered_edge_chunk)
+ fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS;
+ if(udata->new_unfilt_chunk)
+ fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
HDassert(layout->u.chunk.size > 0);
fake_ent.chunk_idx = udata->chunk_idx;
@@ -3385,7 +3569,8 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
} /* end if */
else {
if(chunk)
- chunk = H5D__chunk_mem_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
+ chunk = H5D__chunk_mem_xfree(chunk, (is_unfiltered_edge_chunk ? NULL
+ : &(io_info->dset->shared->dcpl_cache.pline)));
} /* end else */
} /* end if */
else {
@@ -3531,9 +3716,12 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ const H5O_pline_t def_pline = H5O_CRT_PIPELINE_DEF; /* Default pipeline */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* The fill value status */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
+ void *unfilt_fill_buf = NULL; /* Unfiltered fill value buffer */
+ void **fill_buf = NULL; /* Pointer to the fill buffer to use for a chunk */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
@@ -3550,6 +3738,10 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
unsigned op_dim; /* Current operating dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
+ hbool_t has_unfilt_edge_chunks = FALSE; /* Whether there are partial edge chunks with disabled filters */
+ hbool_t unfilt_edge_chunk_dim[H5O_LAYOUT_NDIMS]; /* Whether there are unfiltered edge chunks at the edge of each dimension */
+ hsize_t edge_chunk_scaled[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */
+ unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */
const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
@@ -3594,6 +3786,29 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
if(H5D__get_dxpl_cache(raw_dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Calculate the minimum and maximum chunk offsets in each dimension, and
+ * determine if there are any unfiltered partial edge chunks. Note that we
+ * assume here that all elements of space_dim are > 0. This is checked at
+ * the top of this function. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
+ max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
+
+ /* Calculate if there are unfiltered edge chunks at the edge of this
+ * dimension. Note the edge_chunk_scaled is uninitialized for
+ * dimensions where unfilt_edge_chunk_dim is FALSE. Also */
+ if((layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && pline->nused > 0
+ && space_dim[op_dim] % chunk_dim[op_dim] != 0) {
+ has_unfilt_edge_chunks = TRUE;
+ unfilt_edge_chunk_dim[op_dim] = TRUE;
+ edge_chunk_scaled[op_dim] = max_unalloc[op_dim];
+ } /* end if */
+ else
+ unfilt_edge_chunk_dim[op_dim] = FALSE;
+ } /* end for */
+
/* Get original chunk size */
H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t);
@@ -3625,6 +3840,11 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
fb_info_init = TRUE;
+ /* Initialize the fill_buf pointer to the buffer in fb_info. If edge
+ * chunk filters are disabled, we will switch the buffer as appropriate
+ * for each chunk. */
+ fill_buf = &fb_info.fill_buf;
+
/* Check if there are filters which need to be applied to the chunk */
/* (only do this in advance when the chunk info can be re-used (i.e.
* it doesn't contain any non-default VL datatype fill values)
@@ -3632,6 +3852,14 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
size_t buf_size = orig_chunk_size;
+ /* If the dataset has disabled partial chunk filters, create a copy
+ * of the unfiltered fill_buf to use for partial chunks */
+ if(has_unfilt_edge_chunks) {
+ if(NULL == (unfilt_fill_buf = H5D__chunk_mem_alloc(orig_chunk_size, &def_pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk")
+ HDmemcpy(unfilt_fill_buf, fb_info.fill_buf, orig_chunk_size);
+ } /* end if */
+
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
@@ -3650,14 +3878,6 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = &dset->shared->layout.storage.u.chunk;
- /* Calculate the minimum and maximum chunk offsets in each dimension. Note
- * that we assume here that all elements of space_dim are > 0. This is
- * checked at the top of this function. */
- for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
- min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
- max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
- } /* end for */
-
/* Loop over all chunks */
/* The algorithm is:
* For each dimension:
@@ -3682,6 +3902,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
chunk_size = orig_chunk_size;
for(op_dim = 0; op_dim < space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
+ unsigned u; /* Local index variable */
int i; /* Local index variable */
/* Check if allocation along this dimension is really necessary */
@@ -3692,6 +3913,29 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0])));
scaled[op_dim] = min_unalloc[op_dim];
+ if(has_unfilt_edge_chunks) {
+ /* Initialize nunfilt_edge_chunk_dims */
+ nunfilt_edge_chunk_dims = 0;
+ for(u = 0; u < space_ndims; u++)
+ if(unfilt_edge_chunk_dim[u] && scaled[u]
+ == edge_chunk_scaled[u])
+ nunfilt_edge_chunk_dims++;
+
+ /* Initialize chunk_size and fill_buf */
+ if(should_fill && !fb_info.has_vlen_fill_type) {
+ HDassert(fb_info_init);
+ HDassert(unfilt_fill_buf);
+ if(nunfilt_edge_chunk_dims) {
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ else {
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
} /* end else */
@@ -3707,12 +3951,12 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
/* Make sure the chunk is really in the dataset and outside the
* original dimensions */
{
- unsigned u; /* Local index variable */
+ unsigned v; /* Local index variable */
hbool_t outside_orig = FALSE;
- for(u = 0; u < space_ndims; u++) {
- HDassert((scaled[u] * chunk_dim[u]) < space_dim[u]);
- if((scaled[u] * chunk_dim[u]) >= old_dim[u])
+ for(v = 0; v < space_ndims; v++) {
+ HDassert((scaled[v] * chunk_dim[v]) < space_dim[v]);
+ if((scaled[v] * chunk_dim[v]) >= old_dim[v])
outside_orig = TRUE;
} /* end for */
HDassert(outside_orig);
@@ -3723,6 +3967,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
if(fb_info_init && fb_info.has_vlen_fill_type) {
/* Sanity check */
HDassert(should_fill);
+ HDassert(!unfilt_fill_buf);
#ifdef H5_HAVE_PARALLEL
HDassert(!using_mpi); /* Can't write VL datatypes in parallel currently */
#endif
@@ -3741,7 +3986,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
/* Check if there are filters which need to be applied to the chunk */
- if(pline->nused > 0) {
+ if((pline->nused > 0) && !nunfilt_edge_chunk_dims) {
size_t nbytes = orig_chunk_size;
/* Push the chunk through the filters */
@@ -3759,6 +4004,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
} /* end if */
else
chunk_size = layout->u.chunk.size;
+
+ HDassert(*fill_buf == fb_info.fill_buf);
} /* end if */
/* Initialize the chunk information */
@@ -3770,7 +4017,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
udata.filter_mask = filter_mask;
/* Allocate the chunk (with all processes) */
- if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, scaled) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
HDassert(H5F_addr_defined(udata.chunk_block.offset));
@@ -3786,6 +4033,9 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
/* collect all chunk addresses to be written to
write collectively at the end */
/* allocate/resize address array if no more space left */
+ /* Note that if we add support for parallel filters we must
+ * also store an array of chunk sizes and pass it to the
+ * apporpriate collective write function */
if(0 == chunk_info.num_io % 1024)
if(NULL == (chunk_info.addr = (haddr_t *)H5MM_realloc(chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses")
@@ -3799,7 +4049,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, raw_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, raw_dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -3820,8 +4070,31 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
scaled[i] = min_unalloc[i];
else
scaled[i] = 0;
- } /* end if */
+
+ /* Check if we just left the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i]
+ && edge_chunk_scaled[i] == max_unalloc[i]
+ && scaled[i] < edge_chunk_scaled[i]) {
+ nunfilt_edge_chunk_dims--;
+ if(should_fill && nunfilt_edge_chunk_dims == 0 && !fb_info.has_vlen_fill_type) {
+ HDassert(!H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim));
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end if */
+ } /* end if */
+ } /* end if */
else {
+ /* Check if we just entered the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i] && scaled[i] == edge_chunk_scaled[i]) {
+ HDassert(edge_chunk_scaled[i] == max_unalloc[i]);
+ nunfilt_edge_chunk_dims++;
+ if(should_fill && nunfilt_edge_chunk_dims == 1 && !fb_info.has_vlen_fill_type) {
+ HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim));
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
break;
} /* end else */
@@ -3852,6 +4125,9 @@ done:
if(fb_info_init && H5D__fill_term(&fb_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+ /* Free the unfiltered fill value buffer */
+ unfilt_fill_buf = H5D__chunk_mem_xfree(unfilt_fill_buf, &def_pline);
+
#ifdef H5_HAVE_PARALLEL
if(using_mpi && chunk_info.addr)
H5MM_free(chunk_info.addr);
@@ -3860,6 +4136,185 @@ done:
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_allocate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_update_old_edge_chunks
+ *
+ * Purpose: Update all chunks which were previously partial edge
+ * chunks and are now complete. Determines exactly which
+ * chunks need to be updated and locks each into cache using
+ * the 'prev_unfilt_chunk' flag, then unlocks it, causing
+ * filters to be applied as necessary.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * April 14, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[])
+{
+ hsize_t old_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first previously incomplete chunk in each dimension */
+ hsize_t max_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* largest offset of chunks that might need to be modified in each dimension */
+ hbool_t new_full_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of chunks in this dimension needs to be modified */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ hsize_t chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Dataset's dataspace dimensions */
+ unsigned op_dim; /* Current operationg dimension */
+ H5D_io_info_t chk_io_info; /* Chunked I/O info object */
+ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
+ H5D_storage_t chk_store; /* Chunk storage information */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ void *chunk; /* The file chunk */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
+ HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
+ HDassert(pline->nused > 0);
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Retrieve the dataset dimensions */
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
+
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_sc[space_ndims] = (hsize_t)0;
+
+ /* Check if any current dimensions are smaller than the chunk size, or if
+ * any old dimensions are 0. If so we do not have to do anything. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++)
+ if((space_dim[op_dim] < chunk_dim[op_dim]) || old_dim[op_dim] == 0) {
+ /* Reset any cached chunk info for this dataset */
+ H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ /*
+ * Initialize structures needed to lock chunks into cache
+ */
+ /* Fill the DXPL cache values for later use */
+ if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Set up chunked I/O info object, for operations on chunks (in callback).
+ * Note that we only need to set chunk_offset once, as the array's address
+ * will never change. */
+ chk_store.chunk.scaled = chunk_sc;
+ H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, H5AC_rawdata_dxpl_id, &chk_store, NULL);
+
+ /*
+ * Determine the edges of the dataset which need to be modified
+ */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Start off with this dimension marked as not needing to be modified */
+ new_full_dim[op_dim] = FALSE;
+
+ /* Calulate offset of first previously incomplete chunk in this
+ * dimension */
+ old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]);
+
+ /* Calculate the largest offset of chunks that might need to be
+ * modified in this dimension */
+ max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
+ MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1);
+
+ /* Check for old_dim aligned with chunk boundary in this dimension, if
+ * so we do not need to modify chunks along the edge in this dimension
+ */
+ if(old_dim[op_dim] % chunk_dim[op_dim] == 0)
+ continue;
+
+ /* Check if the dataspace expanded enough to cause the old edge chunks
+ * in this dimension to become full */
+ if((space_dim[op_dim]/chunk_dim[op_dim]) >= (old_edge_chunk_sc[op_dim] + 1))
+ new_full_dim[op_dim] = TRUE;
+ } /* end for */
+
+ /* Main loop: fix old edge chunks */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Check if allocation along this dimension is really necessary */
+ if(!new_full_dim[op_dim])
+ continue;
+ else {
+ HDassert(max_edge_chunk_sc[op_dim] == old_edge_chunk_sc[op_dim]);
+
+ /* Reset the chunk offset indices */
+ HDmemset(chunk_sc, 0, (space_ndims * sizeof(chunk_sc[0])));
+ chunk_sc[op_dim] = old_edge_chunk_sc[op_dim];
+
+ carry = FALSE;
+ } /* end if */
+
+ while(!carry) {
+ int i; /* Local index variable */
+
+ /* Make sure the chunk is really a former edge chunk */
+ HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, old_dim)
+ && !H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, space_dim));
+
+ /* Lookup the chunk */
+ if(H5D__chunk_lookup(dset, dxpl_id, chunk_sc, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* If this chunk does not exist in cache or on disk, no need to do
+ * anything */
+ if(H5F_addr_defined(chk_udata.chunk_block.offset)
+ || (UINT_MAX != chk_udata.idx_hint)) {
+ /* Lock the chunk into cache. H5D__chunk_lock will take care of
+ * updating the chunk to no longer be an edge chunk. */
+ if(NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE)))
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
+
+ /* Unlock the chunk */
+ if(H5D__chunk_unlock(&chk_io_info, &chk_udata, TRUE, chunk, (uint32_t)0) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Increment indices */
+ carry = TRUE;
+ for(i = ((int)space_ndims - 1); i >= 0; --i) {
+ if((unsigned)i != op_dim) {
+ ++chunk_sc[i];
+ if(chunk_sc[i] > (hsize_t) max_edge_chunk_sc[i])
+ chunk_sc[i] = 0;
+ else {
+ carry = FALSE;
+ break;
+ } /* end else */
+ } /* end if */
+ } /* end for */
+ } /* end while(!carry) */
+
+ /* Adjust max_edge_chunk_sc so we don't modify the same chunk twice.
+ * Also check if this dimension started from 0 (and hence modified all
+ * of the old edge chunks. */
+ if(old_edge_chunk_sc[op_dim] == 0)
+ break;
+ else
+ --max_edge_chunk_sc[op_dim];
+ } /* end for(op_dim=0...) */
+
+ /* Reset any cached chunk info for this dataset */
+ H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_update_old_edge_chunks() */
+
#ifdef H5_HAVE_PARALLEL
/*-------------------------------------------------------------------------
@@ -4030,7 +4485,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
+H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
@@ -4057,6 +4512,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
/* Get the info for the chunk in the file */
if(H5D__chunk_lookup(dset, io_info->md_dxpl_id, scaled, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ chk_udata.new_unfilt_chunk = new_unfilt_chunk;
/* If this chunk does not exist in cache or on disk, no need to do anything */
if(!H5F_addr_defined(chk_udata.chunk_block.offset) && UINT_MAX == chk_udata.idx_hint)
@@ -4088,7 +4544,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE)))
+ if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
@@ -4242,6 +4698,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t max_mod_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk to modify in each dimension */
hssize_t max_fill_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk that might be filled in each dimension */
hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
+ hsize_t min_partial_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first partial (or empty) chunk in each dimension */
+ hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@@ -4261,6 +4719,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
+ hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */
+ hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */
unsigned u; /* Local index variable */
const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
@@ -4344,6 +4804,11 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
idx_udata.layout = &layout->u.chunk;
idx_udata.storage = &layout->storage.u.chunk;
+ /* Determine if partial edge chunk filters are disabled */
+ disable_edge_filters = (layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (idx_info.pline->nused > 0);
+
/*
* Determine the chunks which need to be filled or removed
*/
@@ -4370,13 +4835,31 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
min_mod_chunk_sc[op_dim] = space_dim[op_dim] / chunk_dim[op_dim];
/* Determine if we need to fill chunks in this dimension */
- if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim])
+ if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim]) {
fill_dim[op_dim] = TRUE;
- else
+
+ /* If necessary, check if chunks in this dimension that need to
+ * be filled are new partial edge chunks */
+ if(disable_edge_filters && old_dim[op_dim] >= (min_mod_chunk_sc[op_dim] + 1))
+ new_unfilt_dim[op_dim] = TRUE;
+ else
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end if */
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
+
+ /* If necessary, calculate the smallest offset of non-previously full
+ * chunks in this dimension, so we know these chunks were previously
+ * unfiltered */
+ if(disable_edge_filters)
+ min_partial_chunk_sc[op_dim] = old_dim[op_dim] / chunk_dim[op_dim];
} /* end for */
/* Main loop: fill or remove chunks */
@@ -4416,8 +4899,27 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDassert(fill_dim[op_dim]);
HDassert(scaled[op_dim] == min_mod_chunk_sc[op_dim]);
+ /* Make sure this is an edge chunk */
+ HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, space_dim));
+
+ /* Determine if the chunk just became an unfiltered chunk */
+ if(new_unfilt_dim[op_dim]) {
+ new_unfilt_chunk = TRUE;
+ for(u = 0; u < space_ndims; u++)
+ if(scaled[u] == min_partial_chunk_sc[u]) {
+ new_unfilt_chunk = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Make sure that, if we think this is a new unfiltered chunk,
+ * it was previously not an edge chunk */
+ HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk !=
+ !H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, old_dim)));
+ HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]);
+
/* Fill the unused parts of the chunk */
- if(H5D__chunk_prune_fill(&udata) < 0)
+ if(H5D__chunk_prune_fill(&udata, new_unfilt_chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
@@ -4854,8 +5356,15 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
/* Check for filtered chunks */
if((is_vlen || fix_ref) && pline && pline->nused) {
- must_filter = TRUE;
- cb_struct.func = NULL; /* no callback function when failed */
+ /* Check if we should disable filters on this chunk */
+ if(udata->common.layout->flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if(!H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, chunk_rec->scaled, udata->dset_dims))
+ must_filter = TRUE;
+ } /* end if */
+ else
+ must_filter = TRUE;
} /* end if */
/* Resize the buf if it is too small to hold the data */
@@ -4888,6 +5397,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
if(must_filter) {
unsigned filter_mask = chunk_rec->filter_mask;
+ cb_struct.func = NULL; /* no callback function when failed */
if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed")
} /* end if */
@@ -4966,7 +5476,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
udata_dst.common.layout->down_chunks, udata_dst.common.scaled);
/* Allocate chunk in the file */
- if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0)
+ if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Write chunk data to destination file */
@@ -5603,6 +6113,45 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_is_partial_edge_chunk
+ *
+ * Purpose: Checks to see if the chunk is a partial edge chunk.
+ * Either dset or (dset_dims and dset_ndims) must be
+ * provided.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 19 Nov 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims,
+ const hsize_t scaled[], const hsize_t *dset_dims)
+{
+ unsigned u; /* Local index variable */
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(scaled);
+ HDassert(dset_ndims > 0);
+ HDassert(dset_dims);
+ HDassert(chunk_dims);
+
+ /* check if this is a partial edge chunk */
+ for(u = 0; u < dset_ndims; u++)
+ if(((scaled[u] + 1) * chunk_dims[u]) > dset_dims[u])
+ HGOTO_DONE(TRUE);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__chunk_is_partial_edge_chunk() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_file_alloc()
*
* Purpose: Chunk allocation:
@@ -5618,7 +6167,7 @@ done:
*/
static herr_t
H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
- H5F_block_t *new_chunk, hbool_t *need_insert, hsize_t scaled[])
+ H5F_block_t *new_chunk, hbool_t *need_insert)
{
hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */
herr_t ret_value = SUCCEED; /* Return value */
@@ -5635,6 +6184,8 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old
HDassert(new_chunk);
HDassert(need_insert);
+ *need_insert = FALSE;
+
/* Check for filters on chunks */
if(idx_info->pline->nused > 0) {
/* Sanity/error checking block */
@@ -5696,7 +6247,7 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old
new_chunk->offset = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, (hsize_t)new_chunk->length);
if(!H5F_addr_defined(new_chunk->offset))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed")
- *need_insert = TRUE;
+ *need_insert = TRUE;
break;
case H5D_CHUNK_IDX_NTYPES:
diff --git a/src/H5Dint.c b/src/H5Dint.c
index d91693d..570ee99 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -2624,11 +2624,19 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
* and if the chunks are written
*-------------------------------------------------------------------------
*/
- if(shrink && H5D_CHUNKED == dset->shared->layout.type &&
- (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
- /* Remove excess chunks */
- if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+ if(H5D_CHUNKED == dset->shared->layout.type) {
+ if(shrink && (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
+ /* Remove excess chunks */
+ if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+
+ /* Update chunks that are no longer edge chunks as a result of
+ * expansion */
+ if(expand && (dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (dset->shared->dcpl_cache.pline.nused > 0))
+ if(H5D__chunk_update_old_edge_chunks(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks")
+ } /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
if(H5D__mark(dset, dxpl_id, H5D_MARK_SPACE) < 0)
diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c
index bbaa9c5..19b1c95 100644
--- a/src/H5Dlayout.c
+++ b/src/H5Dlayout.c
@@ -164,15 +164,35 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t includ
break;
case H5D_CHUNKED:
- /* Number of dimensions (1 byte) */
- HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- ret_value++;
+ if(layout->version < H5O_LAYOUT_VERSION_4) {
+ /* Number of dimensions (1 byte) */
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ ret_value++;
- /* Dimension sizes */
- ret_value += layout->u.chunk.ndims * 4;
+ /* B-tree address */
+ ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
- /* B-tree address */
- ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
+ /* Dimension sizes */
+ ret_value += layout->u.chunk.ndims * 4;
+ } /* end if */
+ else {
+ /* Chunked layout feature flags */
+ ret_value++;
+
+ /* Number of dimensions (1 byte) */
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ ret_value++;
+
+ /* Encoded # of bytes for each chunk dimension */
+ HDassert(layout->u.chunk.enc_bytes_per_dim > 0 && layout->u.chunk.enc_bytes_per_dim <= 8);
+ ret_value++;
+
+ /* Dimension sizes */
+ ret_value += layout->u.chunk.ndims * layout->u.chunk.enc_bytes_per_dim;
+
+ /* B-tree address */
+ ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
+ } /* end else */
break;
case H5D_VIRTUAL:
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index eabefc4..e208f8e 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -267,6 +267,7 @@ typedef struct H5D_chunk_ud_t {
unsigned idx_hint; /* Index of chunk in cache, if present */
H5F_block_t chunk_block; /* Offset/length of chunk in file */
unsigned filter_mask; /* Excluded filters */
+ hbool_t new_unfilt_chunk; /* Whether the chunk just became unfiltered */
hsize_t chunk_idx; /* Chunk index for EA, FA indexing */
} H5D_chunk_ud_t;
@@ -615,6 +616,8 @@ H5_DLL herr_t H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
const hsize_t *scaled, H5D_chunk_ud_t *udata);
H5_DLL herr_t H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
H5_DLL herr_t H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t old_dim[]);
+H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
+ hsize_t old_dim[]);
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
const hsize_t *old_dim);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index a1f87e3..39e6aa7 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -34,6 +34,9 @@
#define H5D_CHUNK_CACHE_NBYTES_DEFAULT ((size_t) -1)
#define H5D_CHUNK_CACHE_W0_DEFAULT (-1.0f)
+/* Bit flags for the H5Pset_chunk_opts() and H5Pget_chunk_opts() */
+#define H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS (0x0002u)
+
/* Property names for H5LTDdirect_chunk_write */
#define H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME "direct_chunk_flag"
#define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME "direct_chunk_filters"
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index 31ddb88..f2af8ef 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -124,7 +124,7 @@ H5O__layout_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for layout message")
if(mesg->version < H5O_LAYOUT_VERSION_3) {
- unsigned ndims; /* Num dimensions in chunk */
+ unsigned ndims; /* Num dimensions in chunk */
/* Dimensionality */
ndims = *p++;
@@ -233,26 +233,67 @@ H5O__layout_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED
break;
case H5D_CHUNKED:
- /* Dimensionality */
- mesg->u.chunk.ndims = *p++;
- if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
-
- /* B-tree address */
- H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
-
- /* Chunk dimensions */
- for(u = 0; u < mesg->u.chunk.ndims; u++)
- UINT32DECODE(p, mesg->u.chunk.dim[u]);
-
- /* Compute chunk size */
- for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
- mesg->u.chunk.size *= mesg->u.chunk.dim[u];
-
- /* Set the chunk operations */
- /* (Only "btree" indexing type supported with v3 of message format) */
- mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
- mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
+ if(mesg->version < H5O_LAYOUT_VERSION_4) {
+ /* Set the chunked layout flags */
+ mesg->u.chunk.flags = (uint8_t)0;
+
+ /* Dimensionality */
+ mesg->u.chunk.ndims = *p++;
+ if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
+
+ /* B-tree address */
+ H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
+
+ /* Chunk dimensions */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT32DECODE(p, mesg->u.chunk.dim[u]);
+
+ /* Compute chunk size */
+ for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
+ mesg->u.chunk.size *= mesg->u.chunk.dim[u];
+
+ /* Set the chunk operations */
+ /* (Only "btree" indexing type supported with v3 of message format) */
+ mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
+ mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
+ } /* end if */
+ else {
+ /* Get the chunked layout flags */
+ mesg->u.chunk.flags = *p++;
+
+ /* Check for valid flags */
+ /* (Currently issues an error for all non-zero values,
+ * until features are added for the flags)
+ */
+ if(mesg->u.chunk.flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad flag value for message")
+
+ /* Dimensionality */
+ mesg->u.chunk.ndims = *p++;
+ if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is too large")
+
+ /* Encoded # of bytes for each chunk dimension */
+ mesg->u.chunk.enc_bytes_per_dim = *p++;
+ if(mesg->u.chunk.enc_bytes_per_dim == 0 || mesg->u.chunk.enc_bytes_per_dim > 8)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "encoded chunk dimension size is too large")
+
+ /* Chunk dimensions */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT64DECODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim);
+
+ /* Compute chunk size */
+ for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
+ mesg->u.chunk.size *= mesg->u.chunk.dim[u];
+
+ /* Set the chunk operations */
+ mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
+ mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
+
+ /* Chunk index address */
+ H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
+ } /* end else */
/* Set the layout operations */
mesg->ops = H5D_LOPS_CHUNK;
@@ -457,8 +498,8 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p,
HDassert(p);
/* Message version */
- *p++ = mesg->type == H5D_VIRTUAL ? (uint8_t)H5O_LAYOUT_VERSION_4
- : (uint8_t)H5O_LAYOUT_VERSION_3;
+ *p++ = (uint8_t)((mesg->version < H5O_LAYOUT_VERSION_3) ?
+ H5O_LAYOUT_VERSION_3 : mesg->version);
/* Layout class */
*p++ = mesg->type;
@@ -488,16 +529,41 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p,
break;
case H5D_CHUNKED:
- /* Number of dimensions */
- HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- *p++ = (uint8_t)mesg->u.chunk.ndims;
+ if(mesg->version < H5O_LAYOUT_VERSION_4) {
+ /* Number of dimensions */
+ HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ *p++ = (uint8_t)mesg->u.chunk.ndims;
- /* B-tree address */
- H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
+ /* B-tree address */
+ H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
- /* Dimension sizes */
- for(u = 0; u < mesg->u.chunk.ndims; u++)
- UINT32ENCODE(p, mesg->u.chunk.dim[u]);
+ /* Dimension sizes */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT32ENCODE(p, mesg->u.chunk.dim[u]);
+ } /* end if */
+ else {
+ /* Chunk feature flags */
+ *p++ = mesg->u.chunk.flags;
+
+ /* Number of dimensions */
+ HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ *p++ = (uint8_t)mesg->u.chunk.ndims;
+
+ /* Encoded # of bytes for each chunk dimension */
+ HDassert(mesg->u.chunk.enc_bytes_per_dim > 0 && mesg->u.chunk.enc_bytes_per_dim <= 8);
+ *p++ = (uint8_t)mesg->u.chunk.enc_bytes_per_dim;
+
+ /* Dimension sizes */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT64ENCODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim);
+
+ /*
+ * Implicit index: Address of the chunks
+ * Single chunk index: address of the single chunk
+ * Other indexes: chunk index address
+ */
+ H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
+ } /* end else */
break;
case H5D_VIRTUAL:
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index e117b8a..8175bd8 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -367,6 +367,12 @@ typedef struct H5O_efl_t {
*/
#define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1)
+/* Flags for chunked layout feature encoding */
+#define H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS 0x01
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ )
+
/* Initial version of the layout information. Used when space is allocated */
#define H5O_LAYOUT_VERSION_1 1
@@ -511,6 +517,7 @@ typedef struct H5O_storage_t {
} H5O_storage_t;
typedef struct H5O_layout_chunk_t {
+ uint8_t flags; /* Chunk layout flags */
unsigned ndims; /* Num dimensions in chunk */
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
unsigned enc_bytes_per_dim; /* Encoded # of bytes for storing each chunk dimension */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index df88bee..8c9f792 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -57,7 +57,7 @@
#define H5D_DEF_STORAGE_COMPACT_INIT {(hbool_t)FALSE, (size_t)0, NULL}
#define H5D_DEF_STORAGE_CONTIG_INIT {HADDR_UNDEF, (hsize_t)0}
#define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, H5D_COPS_BTREE, {{HADDR_UNDEF, NULL}}}
-#define H5D_DEF_LAYOUT_CHUNK_INIT {(unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (unsigned)0, (uint32_t)0, (hsize_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}
+#define H5D_DEF_LAYOUT_CHUNK_INIT {(uint8_t)0, (unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (unsigned)0, (uint32_t)0, (hsize_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}
#define H5D_DEF_STORAGE_VIRTUAL_INIT {{HADDR_UNDEF, 0}, 0, NULL, 0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, H5D_VDS_ERROR, HSIZE_UNDEF, -1, -1, FALSE}
#ifdef H5_HAVE_C99_DESIGNATED_INITIALIZER
#define H5D_DEF_STORAGE_COMPACT {H5D_COMPACT, { .compact = H5D_DEF_STORAGE_COMPACT_INIT }}
@@ -2631,6 +2631,127 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_chunk_opts
+ *
+ * Purpose: Sets the options related to chunked storage for a dataset.
+ * The storage must already be set to chunked.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Thursday, January 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_chunk_opts(hid_t plist_id, unsigned options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ uint8_t layout_flags = 0; /* "options" translated into layout message flags format */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "iIu", plist_id, options);
+
+ /* Check arguments */
+ if(options & ~(H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "unknown chunk options")
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P__init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ /* Translate options into flags that can be used with the layout message */
+ if(options & H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS)
+ layout_flags |= H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS;
+
+ /* Update the layout message, including the version (if necessary) */
+ /* This probably isn't the right way to do this, and should be changed once
+ * this branch gets the "real" way to set the layout version */
+ layout.u.chunk.flags = layout_flags;
+ if(layout.version < H5O_LAYOUT_VERSION_4)
+ layout.version = H5O_LAYOUT_VERSION_4;
+
+ /* Set layout value */
+ if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't set layout")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_chunk_opts
+ *
+ * Purpose: Gets the options related to chunked storage for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Friday, January 22, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_chunk_opts(hid_t plist_id, unsigned *options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*Iu", plist_id, options);
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P__init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ if(options) {
+ /* Translate options from flags that can be used with the layout message
+ * to those known to the public */
+ *options = 0;
+ if(layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ *options |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_external
*
* Purpose: Adds an external file to the list of external files. PLIST_ID
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 894fc2c..5fdb8b8 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -374,6 +374,8 @@ H5_DLL ssize_t H5Pget_virtual_dsetname(hid_t dcpl_id, size_t index,
char *name/*out*/, size_t size);
H5_DLL herr_t H5Pset_external(hid_t plist_id, const char *name, off_t offset,
hsize_t size);
+H5_DLL herr_t H5Pset_chunk_opts(hid_t plist_id, unsigned opts);
+H5_DLL herr_t H5Pget_chunk_opts(hid_t plist_id, unsigned *opts);
H5_DLL int H5Pget_external_count(hid_t plist_id);
H5_DLL herr_t H5Pget_external(hid_t plist_id, unsigned idx, size_t name_size,
char *name/*out*/, off_t *offset/*out*/,
diff --git a/test/dsets.c b/test/dsets.c
index 2cb51d3..b3f11b8 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -44,10 +44,13 @@ const char *FILENAME[] = {
"huge_chunks", /* 7 */
"chunk_cache", /* 8 */
"big_chunk", /* 9 */
- "chunk_expand", /* 10 */
- "copy_dcpl_newfile",/* 11 */
- "layout_extend", /* 12 */
- "zero_chunk", /* 13 */
+ "chunk_fast", /* 10 */
+ "chunk_expand", /* 11 */
+ "chunk_fixed", /* 12 */
+ "copy_dcpl_newfile",/* 13 */
+ "partial_chunks", /* 14 */
+ "layout_extend", /* 15 */
+ "zero_chunk", /* 16 */
NULL
};
#define FILENAME_BUF_SIZE 1024
@@ -125,6 +128,7 @@ const char *FILENAME[] = {
#define H5Z_FILTER_DEPREC 309
#define H5Z_FILTER_EXPAND 310
#define H5Z_FILTER_CAN_APPLY_TEST2 311
+#define H5Z_FILTER_COUNT 312
/* Flags for testing filters */
#define DISABLE_FLETCHER32 0
@@ -198,6 +202,8 @@ const char *FILENAME[] = {
#define DSET_DIM2 200
int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2];
+size_t count_nbytes_read = 0;
+size_t count_nbytes_written = 0;
/* Local prototypes for filter functions */
static size_t filter_bogus(unsigned int flags, size_t cd_nelmts,
@@ -212,6 +218,49 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
static size_t filter_expand(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+static size_t filter_count(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_COUNT[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_COUNT, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "count", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ filter_count, /* The actual filter function */
+}};
+
+
+/*-------------------------------------------------------------------------
+ * Function: filter_count
+ *
+ * Purpose: This filter counts the number of bytes read and written,
+ * incrementing count_nbytes_read or count_nbytes_written as
+ * appropriate.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, March 17, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+filter_count(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
+ const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes,
+ size_t H5_ATTR_UNUSED *buf_size, void H5_ATTR_UNUSED **buf)
+{
+ if(flags & H5Z_FLAG_REVERSE)
+ count_nbytes_read += nbytes;
+ else
+ count_nbytes_written += nbytes;
+
+ return nbytes;
+}
/*-------------------------------------------------------------------------
@@ -905,7 +954,7 @@ test_layout_extend(hid_t fapl)
TESTING("extendible dataset with various layout");
/* Create a file */
- h5_fixname(FILENAME[12], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[15], fapl, filename, sizeof filename);
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
FAIL_STACK_ERROR
@@ -2798,7 +2847,7 @@ test_nbit_int(hid_t file)
mask = ~((unsigned)~0 << (precision + offset)) & ((unsigned)~0 << offset);
for(i=0; i<(size_t)size[0]; i++) {
for(j=0; j<(size_t)size[1]; j++) {
- if((new_data[i][j] & mask) != (orig_data[i][j] & mask)) {
+ if(((unsigned)new_data[i][j] & mask) != ((unsigned)orig_data[i][j] & mask)) {
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
@@ -3337,9 +3386,9 @@ test_nbit_compound(hid_t file)
s_mask = ~((unsigned)~0 << (precision[2] + offset[2])) & ((unsigned)~0 << offset[2]);
for(i=0; i<size[0]; i++) {
for(j=0; j<size[1]; j++) {
- if((new_data[i][j].i & i_mask) != (orig_data[i][j].i & i_mask) ||
- (new_data[i][j].c & c_mask) != (orig_data[i][j].c & c_mask) ||
- (new_data[i][j].s & s_mask) != (orig_data[i][j].s & s_mask) ||
+ if(((unsigned)new_data[i][j].i & i_mask) != ((unsigned)orig_data[i][j].i & i_mask) ||
+ ((unsigned)new_data[i][j].c & c_mask) != ((unsigned)orig_data[i][j].c & c_mask) ||
+ ((unsigned)new_data[i][j].s & s_mask) != ((unsigned)orig_data[i][j].s & s_mask) ||
(orig_data[i][j].f==orig_data[i][j].f && new_data[i][j].f != orig_data[i][j].f))
{
H5_FAILED();
@@ -3595,16 +3644,16 @@ test_nbit_compound_2(hid_t file)
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++)
- if((new_data[i][j].b[m][n]&b_mask)!=(orig_data[i][j].b[m][n]&b_mask)) {
+ if(((unsigned)new_data[i][j].b[m][n] & b_mask)!=((unsigned)orig_data[i][j].b[m][n] & b_mask)) {
b_failed = 1;
goto out;
}
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++)
- if((new_data[i][j].d[m][n].i & i_mask)!=(orig_data[i][j].d[m][n].i & i_mask)||
- (new_data[i][j].d[m][n].c & c_mask)!=(orig_data[i][j].d[m][n].c & c_mask)||
- (new_data[i][j].d[m][n].s & s_mask)!=(orig_data[i][j].d[m][n].s & s_mask)||
+ if(((unsigned)new_data[i][j].d[m][n].i & i_mask) != ((unsigned)orig_data[i][j].d[m][n].i & i_mask)||
+ ((unsigned)new_data[i][j].d[m][n].c & c_mask) != ((unsigned)orig_data[i][j].d[m][n].c & c_mask)||
+ ((unsigned)new_data[i][j].d[m][n].s & s_mask) != ((unsigned)orig_data[i][j].d[m][n].s & s_mask)||
(new_data[i][j].d[m][n].f==new_data[i][j].d[m][n].f &&
new_data[i][j].d[m][n].f != new_data[i][j].d[m][n].f)) {
d_failed = 1;
@@ -3612,9 +3661,9 @@ test_nbit_compound_2(hid_t file)
}
out:
- if((new_data[i][j].a.i & i_mask)!=(orig_data[i][j].a.i & i_mask)||
- (new_data[i][j].a.c & c_mask)!=(orig_data[i][j].a.c & c_mask)||
- (new_data[i][j].a.s & s_mask)!=(orig_data[i][j].a.s & s_mask)||
+ if(((unsigned)new_data[i][j].a.i & i_mask) != ((unsigned)orig_data[i][j].a.i & i_mask)||
+ ((unsigned)new_data[i][j].a.c & c_mask) != ((unsigned)orig_data[i][j].a.c & c_mask)||
+ ((unsigned)new_data[i][j].a.s & s_mask) != ((unsigned)orig_data[i][j].a.s & s_mask)||
(new_data[i][j].a.f==new_data[i][j].a.f &&
new_data[i][j].a.f != new_data[i][j].a.f)||
new_data[i][j].v != orig_data[i][j].v || b_failed || d_failed) {
@@ -6067,7 +6116,7 @@ test_copy_dcpl(hid_t file, hid_t fapl)
/* Create a second file and create 2 datasets with the copies of the DCPLs in the first
* file. Test whether the copies of DCPLs work. */
- h5_fixname(FILENAME[11], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[13], fapl, filename, sizeof filename);
if((new_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -7849,6 +7898,147 @@ error:
/*-------------------------------------------------------------------------
+ *
+ * test_unfiltered_edge_chunks():
+ * Tests that partial edge chunks aren't filtered when the
+ * H5D_CHUNK_FILTER_PARTIAL_CHUNKS option is set.
+ *
+ * Programmer: Neil Fortner; 17th March, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_unfiltered_edge_chunks(hid_t fapl)
+{
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t sid = -1; /* Dataspace id */
+ hid_t dcpl = -1; /* DCPL id */
+ hsize_t dim[2] = {4, 3}; /* Dataset dimensions */
+ hsize_t cdim[2] = {2, 2}; /* Chunk dimension */
+ char wbuf[4][3]; /* Write buffer */
+ char rbuf[4][3]; /* Read buffer */
+ char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */
+ unsigned opts; /* Chunk options */
+ unsigned i, j; /* Local index variables */
+
+ /* Output message about test being performed */
+ TESTING("disabled partial chunk filters");
+
+ h5_fixname(FILENAME[14], fapl, filename, sizeof filename);
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Register byte-counting filter */
+ if(H5Zregister(H5Z_COUNT) < 0)
+ TEST_ERROR
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dim, NULL)) < 0)
+ TEST_ERROR
+
+ /* Create DCPL */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set chunk dimensions */
+ if(H5Pset_chunk(dcpl, 2, cdim) < 0)
+ TEST_ERROR
+
+ /* Add "count" filter */
+ if(H5Pset_filter(dcpl, H5Z_FILTER_COUNT, 0u, (size_t)0, NULL) < 0)
+ TEST_ERROR
+
+ /* Disable filters on partial chunks */
+ if(H5Pget_chunk_opts(dcpl, &opts) < 0)
+ TEST_ERROR
+ opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ if(H5Pset_chunk_opts(dcpl, opts) < 0)
+ TEST_ERROR
+
+ /* Initialize write buffer */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ wbuf[i][j] = (char)(2 * i) - (char)j;
+
+ /* Reset byte counts */
+ count_nbytes_read = (size_t)0;
+ count_nbytes_written = (size_t)0;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_CHAR, sid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Nothing should have been written, as we are not using early allocation */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)0)
+ TEST_ERROR
+
+ /* Write data */
+ if(H5Dwrite(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were written through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Reopen the dataset */
+ if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+
+ /* Verify that data read == data written */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ if(rbuf[i][j] != wbuf[i][j])
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were read through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Close IDs */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_unfiltered_edge_chunks */
+
+
+/*-------------------------------------------------------------------------
* Function: test_large_chunk_shrink
*
* Purpose: Tests support for shrinking a chunk larger than 1 MB by a
@@ -7990,7 +8180,7 @@ test_zero_dim_dset(hid_t fapl)
TESTING("shrinking large chunk");
- h5_fixname(FILENAME[13], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[16], fapl, filename, sizeof filename);
/* Create file */
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
@@ -9185,6 +9375,7 @@ main(void)
nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0);
nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
diff --git a/test/objcopy.c b/test/objcopy.c
index 9bdb50d..a9791d5 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -3084,6 +3084,146 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_copy_dataset_no_edge_filt
+ *
+ * Purpose: Create a compressed, chunked dataset in SRC file and copy it to DST file
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, May 11, 2010
+ * Mostly copied from test_copy_dataset_compressed, by
+ * Quincey Koziol
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
+ hid_t dst_fapl)
+{
+#ifdef H5_HAVE_FILTER_DEFLATE
+ hid_t fid_src = -1, fid_dst = -1; /* File IDs */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t pid = -1; /* Dataset creation property list ID */
+ hid_t did = -1, did2 = -1; /* Dataset IDs */
+ hsize_t dim2d[2]; /* Dataset dimensions */
+ hsize_t chunk_dim2d[2] ={CHUNK_SIZE_1, CHUNK_SIZE_2}; /* Chunk dimensions */
+ float buf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */
+ int i, j; /* Local index variables */
+ char src_filename[NAME_BUF_SIZE];
+ char dst_filename[NAME_BUF_SIZE];
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ TESTING("H5Ocopy(): compressed dataset with no edge filters");
+
+#ifndef H5_HAVE_FILTER_DEFLATE
+ SKIPPED();
+ puts(" Deflation filter not available");
+#else /* H5_HAVE_FILTER_DEFLATE */
+ /* set initial data values */
+ for (i=0; i<DIM_SIZE_1; i++)
+ for (j=0; j<DIM_SIZE_2; j++)
+ buf[i][j] = 100.0F; /* Something easy to compress */
+
+ /* Initialize the filenames */
+ h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename);
+ h5_fixname(FILENAME[1], dst_fapl, dst_filename, sizeof dst_filename);
+
+ /* Reset file address checking info */
+ addr_reset();
+
+ /* create source file */
+ if((fid_src = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR
+
+ /* Set dataspace dimensions */
+ dim2d[0]=DIM_SIZE_1;
+ dim2d[1]=DIM_SIZE_2;
+
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set comp & chunk plist, and disable partial chunk filters */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, chunk_dim2d) < 0) TEST_ERROR
+ if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR
+ if(H5Pset_chunk_opts(pid, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+
+ /* open the source file with read-only */
+ if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR
+
+ /* create destination file */
+ if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, dst_fapl)) < 0) TEST_ERROR
+
+ /* Create an uncopied object in destination file so that addresses in source and destination files aren't the same */
+ if(H5Gclose(H5Gcreate2(fid_dst, NAME_GROUP_UNCOPIED, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* copy the dataset from SRC to DST */
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+ /* close the DST file */
+ if(H5Fclose(fid_dst) < 0) TEST_ERROR
+
+ PASSED();
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ return 0;
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did2);
+ H5Dclose(did);
+ H5Pclose(pid);
+ H5Sclose(sid);
+ H5Fclose(fid_dst);
+ H5Fclose(fid_src);
+ } H5E_END_TRY;
+ return 1;
+#endif /* H5_HAVE_FILTER_DEFLATE */
+} /* end test_copy_dataset_no_edge_filt */
+
+
+/*-------------------------------------------------------------------------
* Function: test_copy_dataset_compact
*
* Purpose: Create a compact dataset in SRC file and copy it to DST file
@@ -12376,6 +12516,7 @@ main(void)
nerrors += test_copy_dataset_chunked_empty(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_chunked_sparse(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_compressed(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
+ nerrors += test_copy_dataset_no_edge_filt(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_compact(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_multi_ohdr_chunks(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_attr_named_dtype(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
diff --git a/test/set_extent.c b/test/set_extent.c
index acfdc5b..7fe8d75 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -46,8 +46,9 @@ const char *FILENAME[] = {
#define CONFIG_COMPRESS 0x01u
#define CONFIG_FILL 0x02u
#define CONFIG_EARLY_ALLOC 0x04u
+#define CONFIG_UNFILT_EDGE 0x08u
#define CONFIG_ALL (CONFIG_COMPRESS + CONFIG_FILL \
- + CONFIG_EARLY_ALLOC)
+ + CONFIG_EARLY_ALLOC + CONFIG_UNFILT_EDGE)
#define FILL_VALUE -1
#define DO_RANKS_PRINT_CONFIG(TEST) { \
printf(" Config:\n"); \
@@ -56,6 +57,8 @@ const char *FILENAME[] = {
printf(" Fill value: %s\n", (do_fillvalue ? "yes" : "no")); \
printf(" Early allocation: %s\n", (config & CONFIG_EARLY_ALLOC ? "yes" \
: "no")); \
+ printf(" Edge chunk filters: %s\n", (config & CONFIG_UNFILT_EDGE \
+ ? "disabled" : "enabled")); \
} /* end DO_RANKS_PRINT_CONFIG */
#define RANK1 1
@@ -85,18 +88,22 @@ static int do_layouts( hid_t fapl );
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_random_rank4( hid_t fapl,
hid_t dcpl,
hbool_t do_fillvalue,
+ hbool_t disable_edge_filters,
hbool_t do_sparse);
static int test_external( hid_t fapl );
@@ -211,7 +218,8 @@ error:
static int do_ranks( hid_t fapl )
{
- hbool_t do_fillvalue = 0;
+ hbool_t do_fillvalue = FALSE;
+ hbool_t disable_edge_filters = FALSE;
hid_t dcpl = -1;
int fillvalue = FILL_VALUE;
unsigned config;
@@ -247,6 +255,11 @@ static int do_ranks( hid_t fapl )
if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
TEST_ERROR
+ if(config & CONFIG_UNFILT_EDGE)
+ disable_edge_filters = TRUE;
+ else
+ disable_edge_filters = FALSE;
+
/* Run tests */
if(do_fillvalue) {
unsigned ifset;
@@ -261,25 +274,25 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
@@ -293,19 +306,19 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
goto error;
} /* end if */
@@ -316,13 +329,13 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0)
TEST_ERROR
- if(test_random_rank4(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Randomized rank 4")
goto error;
} /* end if */
if(!(config & CONFIG_EARLY_ALLOC))
- if(test_random_rank4(fapl, dcpl, do_fillvalue, TRUE) < 0) {
+ if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation")
goto error;
} /* end if */
@@ -376,6 +389,7 @@ error:
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -433,6 +447,9 @@ static int test_rank1( hid_t fapl,
TEST_ERROR
if(H5Pset_chunk(my_dcpl, RANK1, dims_c) < 0)
TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write dataset
@@ -713,6 +730,7 @@ error:
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -793,6 +811,9 @@ static int test_rank2( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* Procedure 1
@@ -1328,6 +1349,7 @@ error:
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -1414,6 +1436,9 @@ static int test_rank3( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write array
@@ -2488,7 +2513,7 @@ error:
*-------------------------------------------------------------------------
*/
static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
- hbool_t do_sparse )
+ hbool_t disable_edge_filters, hbool_t do_sparse )
{
hid_t file = -1;
hid_t dset = -1;
@@ -2532,6 +2557,9 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
TEST_ERROR
if(H5Pset_chunk(my_dcpl, 4, cdims) < 0)
TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_INT, fspace, H5P_DEFAULT,
my_dcpl, H5P_DEFAULT)) < 0)
TEST_ERROR
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 2e1005c..b7f2fc0 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2515,6 +2515,8 @@ compress_readAll(void)
int rank=1; /* Dataspace rank */
hsize_t dim=dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
DATATYPE *data_read = NULL; /* data buffer */
DATATYPE *data_orig = NULL; /* expected data buffer */
const char *filename;
@@ -2541,116 +2543,132 @@ compress_readAll(void)
for(u=0; u<dim;u++)
data_orig[u]=u;
- /* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
-
- /* Create the file */
- fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((fid > 0), "H5Fcreate succeeded");
-
- /* Create property list for chunking and compression */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_layout(dcpl, H5D_CHUNKED);
- VRFY((ret >= 0), "H5Pset_layout succeeded");
-
- /* Use eight chunks */
- chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
+ /* Run test both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if(mpi_rank==0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if(disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
- ret = H5Pset_deflate(dcpl, 9);
- VRFY((ret >= 0), "H5Pset_deflate succeeded");
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
- /* Create dataspace */
- dataspace = H5Screate_simple(rank, &dim, NULL);
- VRFY((dataspace > 0), "H5Screate_simple succeeded");
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
- /* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dcreate2 succeeded");
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
- /* Write compressed data */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
- VRFY((ret >= 0), "H5Dwrite succeeded");
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
- /* Close objects */
- ret = H5Pclose(dcpl);
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Sclose(dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
- /* Wait for file to be created */
- MPI_Barrier(comm);
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dopen2 succeeded");
- /* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
- VRFY((fid > 0), "H5Fopen succeeded");
+ /* Try reading & writing data */
+ if(dataset>0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
-
- /* Open dataset with compressed chunks */
- dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dopen2 succeeded");
-
- /* Try reading & writing data */
- if(dataset>0) {
- /* Create dataset transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
- /* Try reading the data */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
- nerrors++;
- }
+ /* Verify data read */
+ for(u=0; u<dim; u++)
+ if(data_orig[u]!=data_read[u]) {
+ printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ nerrors++;
+ }
- /* Writing to the compressed, chunked dataset in parallel should fail */
- H5E_BEGIN_TRY {
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- } H5E_END_TRY;
- VRFY((ret < 0), "H5Dwrite failed");
+ /* Writing to the compressed, chunked dataset in parallel should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ } H5E_END_TRY;
+ VRFY((ret < 0), "H5Dwrite failed");
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- } /* end if */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
/* release data buffers */
if(data_read) HDfree(data_read);
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 5e1cd04..44f3f11 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -213,6 +213,8 @@ test_filter_read(void)
hid_t dc; /* HDF5 IDs */
const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset without filters */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char *filename;
hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
@@ -254,74 +256,104 @@ test_filter_read(void)
hrc = H5Pclose (dc);
VRFY(hrc>=0,"H5Pclose");
- /*----------------------------------------------------------
- * STEP 1: Test Fletcher32 Checksum by itself.
- *----------------------------------------------------------
- */
-
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pset_filter");
+ /* Run steps 1-3 both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Set chunk options appropriately */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pget_chunk_opts(dc, &chunk_opts);
+ VRFY(hrc>=0,"H5Pget_chunk_opts");
- filter_read_internal(filename,dc,&fletcher32_size);
- VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
+ if(disable_partial_chunk_filters)
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0,"H5Pclose");
+ /*----------------------------------------------------------
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_FLETCHER32
- /*----------------------------------------------------------
- * STEP 2: Test deflation by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pset_filter");
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
+ VRFY(hrc>=0,"H5Pset_filter");
- filter_read_internal(filename,dc,&deflate_size);
+ filter_read_internal(filename,dc,&fletcher32_size);
+ VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
-#endif /* H5_HAVE_FILTER_DEFLATE */
+#endif /* H5_HAVE_FILTER_FLETCHER32 */
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
- /*----------------------------------------------------------
- * STEP 3: Test szip compression by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_SZIP
- if(h5_szip_can_encode() == 1) {
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
hrc = H5Pset_chunk (dc, 2, chunk_size);
VRFY(hrc>=0, "H5Pset_chunk");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
+
+ hrc = H5Pset_deflate (dc, 6);
+ VRFY(hrc>=0, "H5Pset_deflate");
- filter_read_internal(filename,dc,&szip_size);
+ filter_read_internal(filename,dc,&deflate_size);
/* Clean up objects used for this test */
hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
- }
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+ if(h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0, "H5Pcreate");
+
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc>=0, "H5Pset_szip");
+
+ filter_read_internal(filename,dc,&szip_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
+ }
#endif /* H5_HAVE_FILTER_SZIP */
+ } /* end for */
/*----------------------------------------------------------