summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2010-07-28 15:56:28 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2010-07-28 15:56:28 (GMT)
commit9c218ea879de3f13464aed3c96d5298d1c934774 (patch)
treead87e0a97a4435ce5da69da992c9c424e0db3469 /src
parentbcbf482347a633b07af9eb19ddaccf2bdfccdfa6 (diff)
downloadhdf5-9c218ea879de3f13464aed3c96d5298d1c934774.zip
hdf5-9c218ea879de3f13464aed3c96d5298d1c934774.tar.gz
hdf5-9c218ea879de3f13464aed3c96d5298d1c934774.tar.bz2
[svn-r19137] Purpose: Add support for disabling filters on partial edge chunks.
Added two new API functions, H5Pset_chunk_opts and H5Pget_chunk_ops. When the set function is passed H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS, datasets created with that property list will not apply filters to partially used chunks on the edge of the dataspace. Datasets created in this manner will not be readable by 1.8 or older. Tested: jam, linew, amani (h5committest)
Diffstat (limited to 'src')
-rw-r--r--src/H5Dchunk.c870
-rw-r--r--src/H5Dint.c20
-rw-r--r--src/H5Dmpio.c4
-rw-r--r--src/H5Dpkg.h11
-rw-r--r--src/H5Dpublic.h3
-rw-r--r--src/H5Olayout.c29
-rw-r--r--src/H5Oprivate.h23
-rw-r--r--src/H5Pdcpl.c125
-rw-r--r--src/H5Ppublic.h2
9 files changed, 922 insertions, 165 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 168c011..98dfff9 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -149,6 +149,8 @@ typedef struct H5D_chunk_it_ud3_t {
/* needed for compressed variable-length data */
const H5O_pline_t *pline; /* Filter pipeline */
+ unsigned dset_ndims; /* Number of dimensions in dataset */
+ const hsize_t *dset_dims; /* Dataset dimensions */
/* needed for copy object pointed by refs */
H5O_copy_t *cpy_info; /* Copy options */
@@ -207,6 +209,9 @@ static herr_t H5D_chunk_mem_cb(void *elem, hid_t type_id, unsigned ndims,
const hsize_t *coords, void *fm);
static herr_t H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush);
+static htri_t H5D_chunk_is_partial_edge_chunk(const hsize_t offset[],
+ const H5D_t *dset, unsigned dset_ndims, const hsize_t *dset_dims,
+ const uint32_t *chunk_dims);
/*********************/
@@ -643,7 +648,7 @@ H5D_chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info
#ifdef H5_HAVE_PARALLEL
&& !(io_info->using_mpi_vfd)
#endif /* H5_HAVE_PARALLEL */
- ) {
+ && H5S_SEL_ALL != H5S_GET_SELECT_TYPE(file_space)) {
/* Initialize skip list for chunk selections */
fm->sel_chunks = NULL;
fm->use_single = TRUE;
@@ -861,9 +866,8 @@ H5D_chunk_alloc(size_t size, const H5O_pline_t *pline)
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_alloc)
HDassert(size);
- HDassert(pline);
- if(pline->nused > 0)
+ if(pline && pline->nused)
ret_value = H5MM_malloc(size);
else
ret_value = H5FL_BLK_MALLOC(chunk, size);
@@ -891,10 +895,8 @@ H5D_chunk_xfree(void *chk, const H5O_pline_t *pline)
{
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_xfree)
- HDassert(pline);
-
if(chk) {
- if(pline->nused > 0)
+ if(pline && pline->nused)
H5MM_xfree(chk);
else
chk = H5FL_BLK_FREE(chunk, chk);
@@ -1721,7 +1723,8 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE,
+ FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1754,7 +1757,8 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE,
+ chunk, src_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
@@ -1859,7 +1863,8 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk,
+ FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1911,7 +1916,8 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, chunk,
+ dst_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
/* Advance to next chunk in list */
@@ -2280,6 +2286,7 @@ H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
udata->nbytes = 0;
udata->filter_mask = 0;
udata->addr = HADDR_UNDEF;
+ udata->new_unfilt_chunk = FALSE;
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
@@ -2377,7 +2384,8 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
udata.addr = ent->chunk_addr;
/* Should the chunk be filtered before writing it to disk? */
- if(dset->shared->dcpl_cache.pline.nused) {
+ if(dset->shared->dcpl_cache.pline.nused
+ && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) {
size_t alloc = udata.nbytes; /* Bytes allocated for BUF */
size_t nbytes; /* Chunk size (in bytes) */
@@ -2417,10 +2425,27 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
/* Indicate that the chunk must go through 'insert' method */
must_insert = TRUE;
} /* end if */
- else if(!H5F_addr_defined(udata.addr))
+ else if(!H5F_addr_defined(udata.addr)) {
/* Indicate that the chunk must go through 'insert' method */
must_insert = TRUE;
+ /* This flag could be set for this chunk, just remove and ignore it
+ */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+ else if(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS) {
+ /* Chunk on disk is still filtered, must insert to allocate correct
+ * size */
+ must_insert = TRUE;
+
+ /* Set the disable filters field back to the standard disable
+ * filters setting, as it no longer needs to be inserted with every
+ * flush */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS));
+
/* Check if the chunk needs to be 'inserted' (could exist already and
* the 'insert' operation could resize it)
*/
@@ -2472,7 +2497,9 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
if(buf == ent->chunk)
buf = NULL;
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end if */
done:
@@ -2488,7 +2515,9 @@ done:
*/
if(ret_value < 0 && point_of_no_return) {
if(ent->chunk)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
@@ -2531,7 +2560,9 @@ H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk,
+ (ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline));
} /* end else */
/* Check for SWMR writes to the file */
@@ -2699,22 +2730,33 @@ done:
* Programmer: Robb Matzke
* Thursday, May 21, 1998
*
+ * Modifications: Neil Fortner
+ * Tuesday, December 15, 2009
+ * Added new_unfilt_chunk parameter - if true indicates that
+ * the chunk just became a partial edge chunk and the dataset
+ * is set to disable filters on partial chunks.
+ * Added prev_unfilt_chunk parameter - if true indicates that
+ * the chunk just had filters re-enabled after being disabled.
+ *
*-------------------------------------------------------------------------
*/
void *
H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
- hbool_t relax)
+ hbool_t relax, hbool_t prev_unfilt_chunk)
{
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
- const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
- const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
- const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D_chunk_alloc */
+ const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
+ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
size_t chunk_size; /*size of a chunk */
+ htri_t is_edge_chunk; /* Whether the chunk is an edge chunk */
+ hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
void *chunk = NULL; /*the file chunk */
unsigned u; /*counters */
void *ret_value; /*return value */
@@ -2727,6 +2769,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HDassert(udata);
HDassert(dset);
HDassert(TRUE == H5P_isa_class(io_info->dxpl_id, H5P_DATASET_XFER));
+ HDassert(!(udata->new_unfilt_chunk && prev_unfilt_chunk));
/* Get the chunk's size */
HDassert(layout->u.chunk.size > 0);
@@ -2751,97 +2794,211 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
* Already in the cache. Count a hit.
*/
rdcc->stats.nhits++;
- } /* end if */
- else if(relax) {
- /*
- * Not in the cache, but we're about to overwrite the whole thing
- * anyway, so just allocate a buffer for it but don't initialize that
- * buffer with the file contents. Count this as a hit instead of a
- * miss because we saved ourselves lots of work.
- */
- rdcc->stats.nhits++;
- /* Still save the chunk address so the cache stays consistent */
- chunk_addr = udata->addr;
+ /* Make adjustments if the edge chunk status changed recently */
+ if(pline->nused) {
+ /* If the chunk recently became an unfiltered partial edge chunk
+ * while in cache, we must make some changes to the entry */
+ if(udata->new_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must not have previously been a
+ * partial chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(old_pline->nused);
+
+ /* Disable filters. Set pline to NULL instead of just the
+ * default pipeline to make a quick failure more likely if the
+ * code is changed in an inappropriate/incomplete way. */
+ pline = NULL;
+
+ /* Reallocate the chunk so H5D_chunk_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters disabled as well as "newly
+ * disabled" so it is inserted on flush */
+ ent->edge_chunk_state |= H5D_RDCC_DISABLE_FILTERS;
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must have previously been a partial
+ * chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(pline->nused);
+
+ /* Mark the old pipeline as having been disabled */
+ old_pline = NULL;
+
+ /* Reallocate the chunk so H5D_chunk_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
- if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
- /* In the case that some dataset functions look through this data,
- * clear it to all 0s. */
- HDmemset(chunk, 0, chunk_size);
+ /* Mark the chunk as having filters enabled */
+ ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS
+ | H5D_RDCC_NEWLY_DISABLED_FILTERS);
+ } /* end else */
+ } /* end if */
} /* end if */
else {
- /*
- * Not in the cache. Count this as a miss if it's in the file
- * or an init if it isn't.
- */
-
- /* Save the chunk address */
- chunk_addr = udata->addr;
-
- /* Check if the chunk exists on disk */
- if(H5F_addr_defined(chunk_addr)) {
- size_t chunk_alloc = 0; /*allocated chunk size */
-
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
- H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
- if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
-
- if(pline->nused) {
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
- H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
+ /* Check if we should disable filters on this chunk */
+ if(pline->nused) {
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Disable the filters for writing */
+ disable_filters = TRUE;
+ pline = NULL;
} /* end if */
+ else if(prev_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
- /* Increment # of cache misses */
- rdcc->stats.nmisses++;
+ /* Mark the filters as having been previously disabled (for the
+ * chunk as currently on disk) - disable the filters for reading
+ */
+ old_pline = NULL;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if this is an edge chunk */
+ if((is_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ io_info->store->chunk.offset, io_info->dset, 0, NULL,
+ layout->u.chunk.dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to determine if chunk is edge chunk")
+
+ if(is_edge_chunk) {
+ /* Disable the filters for both writing and reading */
+ disable_filters = TRUE;
+ old_pline = NULL;
+ pline = NULL;
+ } /* end if */
+ } /* end if */
} /* end if */
- else {
- H5D_fill_value_t fill_status;
+ else
+ HDassert(!udata->new_unfilt_chunk && !prev_unfilt_chunk);
+
+ if(relax) {
+ /*
+ * Not in the cache, but we're about to overwrite the whole thing
+ * anyway, so just allocate a buffer for it but don't initialize that
+ * buffer with the file contents. Count this as a hit instead of a
+ * miss because we saved ourselves lots of work.
+ */
+ rdcc->stats.nhits++;
- /* Sanity check */
- HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY);
+ /* Still save the chunk address so the cache stays consistent */
+ chunk_addr = udata->addr;
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
+ /* In the case that some dataset functions look through this data,
+ * clear it to all 0s. */
+ HDmemset(chunk, 0, chunk_size);
+ } /* end if */
+ else {
+ /*
+ * Not in the cache. Count this as a miss if it's in the file
+ * or an init if it isn't.
+ */
- if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
- (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
- /*
- * The chunk doesn't exist in the file. Replicate the fill
- * value throughout the chunk, if the fill value is defined.
- */
+ /* Save the chunk address */
+ chunk_addr = udata->addr;
+
+ /* Check if the chunk exists on disk */
+ if(H5F_addr_defined(chunk_addr)) {
+ size_t chunk_alloc = 0; /*allocated chunk size */
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc,
+ udata->new_unfilt_chunk ? old_pline : pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
+
+ if(old_pline && old_pline->nused) {
+ if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
+ &(udata->filter_mask),
+ io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb,
+ &chunk_alloc, &chunk_alloc, &chunk) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
+ H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
+
+ /* Reallocate chunk if necessary */
+ if(udata->new_unfilt_chunk) {
+ void *tmp_chunk = chunk;
+
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc,
+ pline))) {
+ (void)H5D_chunk_xfree(tmp_chunk, old_pline);
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ } /* end if */
+ HDmemcpy(chunk, tmp_chunk, chunk_size);
+ (void)H5D_chunk_xfree(tmp_chunk, old_pline);
+ } /* end if */
+ } /* end if */
- /* Initialize the fill value buffer */
- /* (use the compact dataset storage buffer as the fill value buffer) */
- if(H5D_fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
- &dset->shared->dcpl_cache.fill, dset->shared->type,
- dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
- fb_info_init = TRUE;
-
- /* Check for VL datatype & non-default fill value */
- if(fb_info.has_vlen_fill_type)
- /* Fill the buffer with VL datatype fill values */
- if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
+ /* Increment # of cache misses */
+ rdcc->stats.nmisses++;
} /* end if */
- else
- HDmemset(chunk, 0, chunk_size);
+ else {
+ H5D_fill_value_t fill_status;
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+
+ if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
+
+ if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
+ (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
+ /*
+ * The chunk doesn't exist in the file. Replicate the fill
+ * value throughout the chunk, if the fill value is defined.
+ */
+
+ /* Initialize the fill value buffer */
+ /* (use the compact dataset storage buffer as the fill value buffer) */
+ if(H5D_fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
+ &dset->shared->dcpl_cache.fill, dset->shared->type,
+ dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
+ fb_info_init = TRUE;
+
+ /* Check for VL datatype & non-default fill value */
+ if(fb_info.has_vlen_fill_type)
+ /* Fill the buffer with VL datatype fill values */
+ if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
+ } /* end if */
+ else
+ HDmemset(chunk, 0, chunk_size);
- /* Increment # of creations */
- rdcc->stats.ninits++;
+ /* Increment # of creations */
+ rdcc->stats.ninits++;
+ } /* end else */
} /* end else */
} /* end else */
HDassert(chunk_size > 0);
@@ -2888,6 +3045,9 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
ent->locked = 0;
+ ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
+ if(udata->new_unfilt_chunk)
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
ent->dirty = FALSE;
ent->deleted = FALSE;
ent->chunk_addr = chunk_addr;
@@ -3008,11 +3168,33 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
* Note: we have to copy the layout and filter messages so we
* don't discard the `const' qualifier.
*/
+ htri_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */
+
+ /* Check if we should disable filters on this chunk */
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ is_unfiltered_edge_chunk = TRUE;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if((is_unfiltered_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ io_info->store->chunk.offset, io_info->dset, 0, NULL,
+ layout->u.chunk.dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to determine if chunk is edge chunk")
+ } /* end if */
+
if(dirty) {
H5D_rdcc_ent_t fake_ent; /* "fake" chunk cache entry */
HDmemset(&fake_ent, 0, sizeof(fake_ent));
fake_ent.dirty = TRUE;
+ if(is_unfiltered_edge_chunk)
+ fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS;
+ if(udata->new_unfilt_chunk)
+ fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
HDmemcpy(fake_ent.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(fake_ent.offset[0]));
HDassert(layout->u.chunk.size > 0);
fake_ent.chunk_addr = udata->addr;
@@ -3023,7 +3205,8 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
} /* end if */
else {
if(chunk)
- chunk = H5D_chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
+ chunk = H5D_chunk_xfree(chunk, is_unfiltered_edge_chunk ? NULL
+ : &(io_info->dset->shared->dcpl_cache.pline));
} /* end else */
} /* end if */
else {
@@ -3177,13 +3360,17 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated */
hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ size_t chunk_size; /* Size of current chunk in bytes, possibly filtered */
size_t orig_chunk_size; /* Original size of chunk in bytes */
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ const H5O_pline_t def_pline = H5O_CRT_PIPELINE_DEF; /* Default pipeline */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* The fill value status */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
+ void *unfilt_fill_buf = NULL; /* Unfiltered fill value buffer */
+ void **fill_buf = NULL; /* Pointer to the fill buffer to use for a chunk */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
@@ -3200,6 +3387,10 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
int op_dim; /* Current operationg dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
+ hbool_t has_unfilt_edge_chunks = FALSE; /* Whether there are partial edge chunks with disabled filters */
+ hbool_t unfilt_edge_chunk_dim[H5O_LAYOUT_NDIMS]; /* Whether there are unfiltered edge chunks at the edge of each dimension */
+ hsize_t edge_chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */
+ unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */
hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3262,6 +3453,31 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(H5D_get_dxpl_cache(data_dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Calculate the minimum and maximum chunk offsets in each dimension, and
+ * determine if there are any unfiltered partial edge chunks. Note that we
+ * assume here that all elements of space_dim are > 0. This is checked at
+ * the top of this function. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
+ / chunk_dim[op_dim]) * chunk_dim[op_dim];
+ max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
+ * chunk_dim[op_dim];
+
+ /* Calculate if there are unfiltered edge chunks at the edge of this
+ * dimension. Note the edge_chunk_offset is uninitialized for
+ * dimensions where unfilt_edge_chunk_dim is FALSE. Also */
+ if((layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && pline->nused > 0
+ && space_dim[op_dim] % chunk_dim[op_dim] != 0) {
+ has_unfilt_edge_chunks = TRUE;
+ unfilt_edge_chunk_dim[op_dim] = TRUE;
+ edge_chunk_offset[op_dim] = max_unalloc[op_dim];
+ } /* end if */
+ else
+ unfilt_edge_chunk_dim[op_dim] = FALSE;
+ } /* end for */
+
/* Get original chunk size */
H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t);
@@ -3291,12 +3507,25 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
fb_info_init = TRUE;
- /* Check if there are filters which need to be applied to the chunk */
- /* (only do this in advance when the chunk info can be re-used (i.e.
- * it doesn't contain any non-default VL datatype fill values)
- */
- if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
- size_t buf_size = orig_chunk_size;
+ /* Initialize the fill_buf pointer to the buffer in fb_info. If edge
+ * chunk filters are disabled, we will switch the buffer as appropriate
+ * for each chunk. */
+ fill_buf = &fb_info.fill_buf;
+
+ /* Check if there are filters which need to be applied to the chunk */
+ /* (only do this in advance when the chunk info can be re-used (i.e.
+ * it doesn't contain any non-default VL datatype fill values)
+ */
+ if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
+ size_t buf_size = orig_chunk_size;
+ /* If the dataset has disabled partial chunk filters, create a copy
+ * of the unfiltered fill_buf to use for partial chunks */
+ if(has_unfilt_edge_chunks) {
+ if(NULL == (unfilt_fill_buf = H5D_chunk_alloc(orig_chunk_size,
+ &def_pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk")
+ HDmemcpy(unfilt_fill_buf, fb_info.fill_buf, orig_chunk_size);
+ } /* end if */
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
@@ -3316,16 +3545,6 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = &dset->shared->layout.storage.u.chunk;
- /* Calculate the minimum and maximum chunk offsets in each dimension. Note
- * that we assume here that all elements of space_dim are > 0. This is
- * checked at the top of this function */
- for(op_dim=0; op_dim<space_ndims; op_dim++) {
- min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
- / chunk_dim[op_dim]) * chunk_dim[op_dim];
- max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
- * chunk_dim[op_dim];
- } /* end for */
-
/* Loop over all chunks */
/* The algorithm is:
* For each dimension:
@@ -3344,6 +3563,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* certain dimension, max_unalloc is updated in order to avoid allocating
* those chunks again.
*/
+ chunk_size = orig_chunk_size;
for(op_dim=0; op_dim<space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
int i; /* Local index variable */
@@ -3357,12 +3577,33 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* sizeof(chunk_offset[0])));
chunk_offset[op_dim] = min_unalloc[op_dim];
+ if(has_unfilt_edge_chunks) {
+ /* Initialize nunfilt_edge_chunk_dims */
+ nunfilt_edge_chunk_dims = 0;
+ for(i=0; i<space_ndims; i++)
+ if(unfilt_edge_chunk_dim[i] && chunk_offset[i]
+ == edge_chunk_offset[i])
+ nunfilt_edge_chunk_dims++;
+
+ /* Initialize chunk_size and fill_buf */
+ if(should_fill && !fb_info.has_vlen_fill_type) {
+ HDassert(fb_info_init);
+ HDassert(unfilt_fill_buf);
+ if(nunfilt_edge_chunk_dims) {
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ else {
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
} /* end if */
while(!carry) {
- size_t chunk_size = orig_chunk_size; /* Size of chunk in bytes, possibly filtered */
-
#ifndef NDEBUG
/* None of the chunks should be allocated */
{
@@ -3398,6 +3639,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(fb_info_init && fb_info.has_vlen_fill_type) {
/* Sanity check */
HDassert(should_fill);
+ HDassert(!unfilt_fill_buf);
/* Check to make sure the buffer is large enough. It is
* possible (though ill-advised) for the filter to shrink the
@@ -3414,7 +3656,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
/* Check if there are filters which need to be applied to the chunk */
- if(pline->nused > 0) {
+ if(!nunfilt_edge_chunk_dims) {
size_t nbytes = orig_chunk_size;
/* Push the chunk through the filters */
@@ -3430,6 +3672,10 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
} /* end if */
+ else
+ chunk_size = layout->u.chunk.size;
+
+ HDassert(*fill_buf == fb_info.fill_buf);
} /* end if */
/* Initialize the chunk information */
@@ -3457,7 +3703,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE == mpi_rank)
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Indicate that blocks are being written */
@@ -3465,23 +3711,54 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
#endif /* H5_HAVE_PARALLEL */
} /* end if */
- /* Increment indices */
+ /* Increment indices and adjust the edge chunk state */
carry = TRUE;
- for(i = (int)(space_ndims - 1); i >= 0; --i) {
+ for(i = (space_ndims - 1); i >= 0; --i) {
chunk_offset[i] += chunk_dim[i];
- if(chunk_offset[i] > max_unalloc[i])
+ if(chunk_offset[i] > max_unalloc[i]) {
if(i == op_dim)
chunk_offset[i] = min_unalloc[i];
else
chunk_offset[i] = 0;
+
+ /* Check if we just left the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i]
+ && edge_chunk_offset[i] == max_unalloc[i]
+ && chunk_offset[i] < edge_chunk_offset[i]) {
+ nunfilt_edge_chunk_dims--;
+ if(should_fill && nunfilt_edge_chunk_dims == 0
+ && !fb_info.has_vlen_fill_type) {
+ HDassert(!H5D_chunk_is_partial_edge_chunk(
+ chunk_offset, NULL, (unsigned)space_ndims,
+ space_dim, chunk_dim));
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end if */
+ } /* end if */
+ } /* end if */
else {
+ /* Check if we just entered the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i] && chunk_offset[i]
+ == edge_chunk_offset[i]) {
+ HDassert(edge_chunk_offset[i] == max_unalloc[i]);
+ nunfilt_edge_chunk_dims++;
+ if(should_fill && nunfilt_edge_chunk_dims == 1
+ && !fb_info.has_vlen_fill_type) {
+ HDassert(H5D_chunk_is_partial_edge_chunk(
+ chunk_offset, NULL, (unsigned)space_ndims,
+ space_dim, chunk_dim));
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
break;
} /* end else */
@@ -3518,11 +3795,213 @@ done:
if(fb_info_init && H5D_fill_term(&fb_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+ /* Free the unfiltered fill value buffer */
+ unfilt_fill_buf = H5D_chunk_xfree(unfilt_fill_buf, &def_pline);
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_chunk_allocate() */
/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_update_old_edge_chunks
+ *
+ * Purpose: Update all chunks which were previously partial edge
+ * chunks and are now complete. Determines exactly which
+ * chunks need to be updated and locks each into cache using
+ * the 'prev_unfilt_chunk' flag, then unlocks it, causing
+ * filters to be applied as necessary.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * April 14, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[])
+{
+ hsize_t old_edge_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first previously incomplete chunk in each dimension */
+ hsize_t max_edge_chunk_off[H5O_LAYOUT_NDIMS]; /* largest offset of chunks that might need to be modified in each dimension */
+ hbool_t new_full_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of chunks in this dimension needs to be modified */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
+ int space_ndims; /* Dataset's space rank */
+ hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ int op_dim; /* Current operationg dimension */
+ H5D_io_info_t chk_io_info; /* Chunked I/O info object */
+ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
+ H5D_storage_t chk_store; /* Chunk storage information */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ void *chunk; /* The file chunk */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5D_chunk_update_old_edge_chunks, FAIL)
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ HDassert((H5D_CHUNK_IDX_EARRAY == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_EARRAY == layout->storage.u.chunk.ops) ||
+ (H5D_CHUNK_IDX_FARRAY == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_FARRAY == layout->storage.u.chunk.ops) ||
+ (H5D_CHUNK_IDX_BTREE == layout->storage.u.chunk.idx_type &&
+ H5D_COPS_BTREE == layout->storage.u.chunk.ops));
+ HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
+ HDassert(pline->nused > 0);
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Retrieve the dataset dimensions */
+ if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
+ space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_offset[space_ndims] = (hsize_t)0;
+
+ /* Check if any current dimensions are smaller than the chunk size, or if
+ * any old dimensions are 0. If so we do not have to do anything. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++)
+ if((space_dim[op_dim] < chunk_dim[op_dim]) || old_dim[op_dim] == 0) {
+ /* Reset any cached chunk info for this dataset */
+ H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ /*
+ * Initialize structures needed to lock chunks into cache
+ */
+ /* Fill the DXPL cache values for later use */
+ if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Set up chunked I/O info object, for operations on chunks (in callback).
+ * Note that we only need to set chunk_offset once, as the array's address
+ * will never change. */
+ chk_store.chunk.offset = chunk_offset;
+ H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL);
+
+ /*
+ * Determine the edges of the dataset which need to be modified
+ */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Start off with this dimension marked as not needing to be modified */
+ new_full_dim[op_dim] = FALSE;
+
+ /* Calulate offset of first previously incomplete chunk in this
+ * dimension */
+ old_edge_chunk_off[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim])
+ * chunk_dim[op_dim];
+
+ /* Calculate the largest offset of chunks that might need to be
+ * modified in this dimension */
+ max_edge_chunk_off[op_dim] = chunk_dim[op_dim]
+ * MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
+ MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1);
+
+ /* Check for old_dim aligned with chunk boundary in this dimension, if
+ * so we do not need to modify chunks along the edge in this dimension
+ */
+ if(old_dim[op_dim] % chunk_dim[op_dim] == 0)
+ continue;
+
+ /* Check if the dataspace expanded enough to cause the old edge chunks
+ * in this dimension to become full */
+ if(space_dim[op_dim] >= old_edge_chunk_off[op_dim] + chunk_dim[op_dim])
+ new_full_dim[op_dim] = TRUE;
+ } /* end for */
+
+ /* Main loop: fix old edge chunks */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ int i; /* Local index variable */
+
+ /* Check if allocation along this dimension is really necessary */
+ if(!new_full_dim[op_dim])
+ continue;
+ else {
+ HDassert((hsize_t) max_edge_chunk_off[op_dim]
+ == old_edge_chunk_off[op_dim]);
+
+ /* Reset the chunk offset indices */
+ HDmemset(chunk_offset, 0, ((unsigned)space_ndims
+ * sizeof(chunk_offset[0])));
+ chunk_offset[op_dim] = old_edge_chunk_off[op_dim];
+
+ carry = FALSE;
+ } /* end if */
+
+ while(!carry) {
+ /* Make sure the chunk is really a former edge chunk */
+ HDassert(H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, old_dim, chunk_dim)
+ && !H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, space_dim, chunk_dim));
+
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index((unsigned)space_ndims, chunk_offset, chunk_dim,
+ layout->u.chunk.down_chunks,
+ &(chk_io_info.store->chunk.index)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
+
+ /* Lookup the chunk */
+ if(H5D_chunk_lookup(dset, dxpl_id, chunk_offset,
+ chk_io_info.store->chunk.index, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* If this chunk does not exist in cache or on disk, no need to do
+ * anything */
+ if(H5F_addr_defined(chk_udata.addr)
+ || (UINT_MAX != chk_udata.idx_hint)) {
+ /* Lock the chunk into cache. H5D_chunk_lock will take care of
+ * updating the chunk to no longer be an edge chunk. */
+ if(NULL == (chunk = (void *)H5D_chunk_lock(&chk_io_info,
+ &chk_udata, FALSE, TRUE)))
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
+
+ /* Unlock the chunk */
+ if(H5D_chunk_unlock(&chk_io_info, &chk_udata, TRUE,
+ chunk, (uint32_t)0) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Increment indices */
+ carry = TRUE;
+ for(i = (space_ndims - 1); i >= 0; --i) {
+ if(i != op_dim) {
+ chunk_offset[i] += chunk_dim[i];
+ if(chunk_offset[i] > (hsize_t) max_edge_chunk_off[i])
+ chunk_offset[i] = 0;
+ else {
+ carry = FALSE;
+ break;
+ } /* end else */
+ } /* end if */
+ } /* end for */
+ } /* end while(!carry) */
+
+ /* Adjust max_edge_chunk_off so we don't modify the same chunk twice.
+ * Also check if this dimension started from 0 (and hence modified all
+ * of the old edge chunks. */
+ if(old_edge_chunk_off[op_dim] == 0)
+ break;
+ else
+ --max_edge_chunk_off[op_dim];
+ } /* end for(op_dim=0...) */
+
+ /* Reset any cached chunk info for this dataset */
+ H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_chunk_update_old_edge_chunks() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D_chunk_prune_fill
*
* Purpose: Write the fill value to the parts of the chunk that are no
@@ -3536,7 +4015,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
+H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
@@ -3558,6 +4037,7 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
if(H5D_chunk_lookup(dset, io_info->dxpl_id, chunk_offset,
io_info->store->chunk.index, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ chk_udata.new_unfilt_chunk = new_unfilt_chunk;
/* If this chunk does not exist in cache or on disk, no need to do anything
*/
@@ -3591,7 +4071,8 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- if(NULL == (chunk = (void *)H5D_chunk_lock(io_info, &chk_udata, FALSE)))
+ if(NULL == (chunk = (void *)H5D_chunk_lock(io_info, &chk_udata, FALSE,
+ FALSE)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
@@ -3632,7 +4113,8 @@ H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
bytes_accessed = (uint32_t)sel_nelmts * layout->u.chunk.dim[rank];
/* Release lock on chunk */
- if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, chunk, bytes_accessed) < 0)
+ if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, chunk,
+ bytes_accessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
done:
@@ -3756,6 +4238,8 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */
int ndims_outside_fill = 0; /* Number of dimensions in chunk offset outside fill dimensions */
+ hsize_t min_partial_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first partial (or empty) chunk in each dimension */
+ hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */
hbool_t has_fill = FALSE; /* Whether there are chunks that must be filled */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
@@ -3778,6 +4262,8 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
+ hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */
+ hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
int i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3867,6 +4353,11 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
idx_udata.layout = &layout->u.chunk;
idx_udata.storage = &layout->storage.u.chunk;
+ /* Determine if partial edge chunk filters are disabled */
+ disable_edge_filters = (layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (idx_info.pline->nused > 0);
+
/*
* Determine the chunks which need to be filled or removed
*/
@@ -3898,12 +4389,31 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
== max_fill_chunk_off[op_dim]) {
fill_dim[op_dim] = TRUE;
has_fill = TRUE;
+
+ /* If necessary, check if chunks in this dimension that need to
+ * be filled are new partial edge chunks */
+ if(disable_edge_filters && old_dim[op_dim]
+ >= (min_mod_chunk_off[op_dim] + chunk_dim[op_dim]))
+ new_unfilt_dim[op_dim] = TRUE;
+ else
+ new_unfilt_dim[op_dim] = FALSE;
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
+
+ /* If necessary, calculate the smallest offset of non-previously full
+ * chunks in this dimension, so we know these chunks were previously
+ * unfiltered */
+ if(disable_edge_filters)
+ min_partial_chunk_off[op_dim] = chunk_dim[op_dim] * (old_dim[op_dim]
+ / chunk_dim[op_dim]);
} /* end for */
/* Check the cache for any entries that are outside the bounds. Mark these
@@ -3957,8 +4467,29 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDassert(fill_dim[op_dim]);
HDassert(chunk_offset[op_dim] == min_mod_chunk_off[op_dim]);
+ /* Make sure this is an edge chunk */
+ HDassert(H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, space_dim, layout->u.chunk.dim));
+
+ /* Determine if the chunk just became an unfiltered chunk */
+ if(new_unfilt_dim[op_dim]) {
+ new_unfilt_chunk = TRUE;
+ for(i=0; i<space_ndims; i++)
+ if(chunk_offset[i] == min_partial_chunk_off[i]) {
+ new_unfilt_chunk = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Make sure that, if we think this is a new unfiltered chunk,
+ * it was previously not an edge chunk */
+ HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk !=
+ !H5D_chunk_is_partial_edge_chunk(chunk_offset, NULL,
+ (unsigned)space_ndims, old_dim, layout->u.chunk.dim)));
+ HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]);
+
/* Fill the unused parts of the chunk */
- if(H5D_chunk_prune_fill(&udata) < 0)
+ if(H5D_chunk_prune_fill(&udata, new_unfilt_chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
@@ -4354,7 +4885,7 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
const H5O_pline_t *pline = udata->pline; /* I/O pipeline for applying filters */
/* needed for commpressed variable length data */
- hbool_t has_filters = FALSE; /* Whether chunk has filters */
+ hbool_t must_filter = FALSE; /* Whether chunk must be filtered during copy */
size_t nbytes; /* Size of chunk in file (in bytes) */
H5Z_cb_t cb_struct; /* Filter failure callback struct */
@@ -4376,9 +4907,23 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
} /* end if */
/* Check for filtered chunks */
- if(pline && pline->nused) {
- has_filters = TRUE;
- cb_struct.func = NULL; /* no callback function when failed */
+ if((is_vlen || fix_ref) && pline && pline->nused) {
+ /* Check if we should disable filters on this chunk */
+ if(udata->common.layout->flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ htri_t is_edge_chunk; /* Whether the chunk is an edge chunk */
+
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if((is_edge_chunk = H5D_chunk_is_partial_edge_chunk(
+ chunk_rec->offset, NULL, udata->dset_ndims,
+ udata->dset_dims, udata->common.layout->dim)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5_ITER_ERROR, "unable to determine if chunk is edge chunk")
+
+ if(!is_edge_chunk)
+ must_filter = TRUE;
+ } /* end if */
+ else
+ must_filter = TRUE;
} /* end if */
/* Resize the buf if it is too small to hold the data */
@@ -4408,9 +4953,10 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk")
/* Need to uncompress variable-length & reference data elements */
- if(has_filters && (is_vlen || fix_ref)) {
+ if(must_filter) {
unsigned filter_mask = chunk_rec->filter_mask;
+ cb_struct.func = NULL; /* no callback function when failed */
if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed")
} /* end if */
@@ -4472,7 +5018,7 @@ H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
udata_dst.addr = HADDR_UNDEF;
/* Need to compress variable-length & reference data elements before writing to file */
- if(has_filters && (is_vlen || fix_ref) ) {
+ if(must_filter) {
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
#if H5_SIZEOF_SIZE_T > 4
@@ -4527,6 +5073,8 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
H5D_chunk_it_ud3_t udata; /* User data for iteration callback */
H5D_chk_idx_info_t idx_info_dst; /* Dest. chunked index info */
H5D_chk_idx_info_t idx_info_src; /* Source chunked index info */
+ int sndims; /* Rank of dataspace */
+ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
H5O_pline_t _pline; /* Temporary pipeline info */
const H5O_pline_t *pline; /* Pointer to pipeline info to use */
H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */
@@ -4582,8 +5130,6 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
/* Initialize layout information */
{
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
- int sndims; /* Rank of dataspace */
unsigned ndims; /* Rank of dataspace */
/* Get the dim info for dataset */
@@ -4739,6 +5285,8 @@ H5D_chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
udata.buf_space = buf_space;
udata.nelmts = nelmts;
udata.pline = pline;
+ udata.dset_ndims = (unsigned)sndims;
+ udata.dset_dims = curr_dims;
udata.cpy_info = cpy_info;
/* Iterate over chunks to copy data */
@@ -5168,3 +5716,59 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_nonexistent_readvv() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_is_partial_edge_chunk
+ *
+ * Purpose: Checks to see if the chunk is a partial edge chunk.
+ * Either dset or (dset_dims and dset_ndims) must be
+ * provided.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 19 Nov 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D_chunk_is_partial_edge_chunk(const hsize_t offset[], const H5D_t *dset,
+ unsigned dset_ndims, const hsize_t *dset_dims, const uint32_t *chunk_dims)
+{
+ hsize_t _dset_dims[H5O_LAYOUT_NDIMS]; /* Dataset dimensions */
+ unsigned i; /* Local index variables */
+ htri_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_is_partial_edge_chunk)
+
+ /* Check args */
+ HDassert(offset);
+ HDassert((chunk_dims && dset_dims) || dset);
+
+ /* Get chunk dimensions if not specified */
+ if(!chunk_dims)
+ chunk_dims = dset->shared->layout.u.chunk.dim;
+
+ /* Get dataset dimensions if not specified */
+ if(!dset_dims) {
+ int tmp_ndims;
+ if((tmp_ndims = H5S_get_simple_extent_dims(dset->shared->space,
+ _dset_dims, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to get simple dataspace info")
+
+ dset_dims = _dset_dims;
+ H5_ASSIGN_OVERFLOW(dset_ndims, tmp_ndims, int, unsigned);
+ } /* end if */
+
+ /* check if this is a partial edge chunk */
+ for(i=0; i<dset_ndims; i++)
+ if((offset[i] + chunk_dims[i]) > dset_dims[i]) {
+ ret_value = TRUE;
+ break;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_chunk_is_partial_edge_chunk() */
+
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 032006f..8c4d8cd 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -2306,11 +2306,21 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
* and if the chunks are written
*-------------------------------------------------------------------------
*/
- if(shrink && H5D_CHUNKED == dset->shared->layout.type &&
- (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
- /* Remove excess chunks */
- if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+ if(H5D_CHUNKED == dset->shared->layout.type) {
+ if(shrink && (*dset->shared->layout.ops->is_space_alloc)(
+ &dset->shared->layout.storage))
+ /* Remove excess chunks */
+ if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+
+ /* Update chunks that are no longer edge chunks as a result of
+ * expansion */
+ if(expand && (dset->shared->layout.u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (dset->shared->dcpl_cache.pline.nused > 0))
+ if(H5D_chunk_update_old_edge_chunks(dset, dxpl_id, curr_dims)
+ < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks")
} /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index e646a7b..c32536f 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -1256,7 +1256,7 @@ if(H5DEBUG(D))
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1484,7 +1484,7 @@ if(H5DEBUG(D)) {
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index d5a7e45..63cc010 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -276,6 +276,7 @@ typedef struct H5D_chunk_ud_t {
uint32_t nbytes; /*size of stored data */
unsigned filter_mask; /*excluded filters */
haddr_t addr; /*file address of chunk */
+ hbool_t new_unfilt_chunk; /*whether the chunk just became unfiltered */
} H5D_chunk_ud_t;
/* Typedef for "generic" chunk callbacks */
@@ -505,11 +506,17 @@ typedef struct {
hsize_t size; /* Accumulated number of bytes for the selection */
} H5D_vlen_bufsize_t;
+/* Flags for the "edge_chunk_state" field below */
+#define H5D_RDCC_DISABLE_FILTERS 0x01u /* Disable filters on this chunk */
+#define H5D_RDCC_NEWLY_DISABLED_FILTERS 0x02u /* Filters have been disabled since
+ * the last flush */
+
/* Raw data chunks are cached. Each entry in the cache is: */
typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
hbool_t deleted; /*chunk about to be deleted (do not flush) */
+ unsigned edge_chunk_state; /*states related to edge chunks (see above) */
hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
@@ -640,7 +647,7 @@ H5_DLL hbool_t H5D_chunk_is_space_alloc(const H5O_storage_t *storage);
H5_DLL herr_t H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
const hsize_t *chunk_offset, hsize_t chunk_idx, H5D_chunk_ud_t *udata);
H5_DLL void *H5D_chunk_lock(const H5D_io_info_t *io_info,
- H5D_chunk_ud_t *udata, hbool_t relax);
+ H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk);
H5_DLL herr_t H5D_chunk_unlock(const H5D_io_info_t *io_info,
const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
@@ -649,6 +656,8 @@ H5_DLL herr_t H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
H5_DLL herr_t H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id,
hbool_t full_overwrite, hsize_t old_dim[]);
+H5_DLL herr_t H5D_chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
+ hsize_t old_dim[]);
H5_DLL herr_t H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
const hsize_t *old_dim);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index 056346e..972b33b 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -34,6 +34,9 @@
#define H5D_CHUNK_CACHE_NBYTES_DEFAULT ((size_t) -1)
#define H5D_CHUNK_CACHE_W0_DEFAULT -1.
+/* Bit flags for the H5Pset_chunk_opts() and H5Pget_chunk_opts() */
+#define H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS (0x0002u)
+
/*******************/
/* Public Typedefs */
/*******************/
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index 448e4bf..82227e6 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -35,24 +35,6 @@
/* Local macros */
-/* Flags for chunked layout feature encoding */
-#ifdef NOT_YET
-#define H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE 0x01
-#define H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE 0x02
-#define H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS 0x04
-#define H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS 0x08
-#define H5O_LAYOUT_CHUNK_APPLY_FILTER_TO_PARTIAL_BOUND_CHUNKS 0x10
-#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
- H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE \
- | H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE \
- | H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS \
- | H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS \
- | H5O_LAYOUT_CHUNK_APPLY_FILTER_TO_PARTIAL_BOUND_CHUNKS \
- )
-#else /* NOT_YET */
-#define H5O_LAYOUT_ALL_CHUNK_FLAGS 0
-#endif /* NOT_YET */
-
/* PRIVATE PROTOTYPES */
static void *H5O_layout_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
@@ -264,6 +246,9 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, H5O_t UNUSED *open_oh,
/* Set the storage type */
mesg->storage.type = mesg->type;
+ /* Set the chunked layout flags */
+ mesg->u.chunk.flags = (uint8_t)0;
+
/* Dimensionality */
mesg->u.chunk.ndims = *p++;
if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
@@ -286,16 +271,14 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, H5O_t UNUSED *open_oh,
mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
} /* end if */
else {
- unsigned char flags; /* Flags for encoding group info */
-
/* Get the chunked layout flags */
- flags = *p++;
+ mesg->u.chunk.flags = *p++;
/* Check for valid flags */
/* (Currently issues an error for all non-zero values,
* until features are added for the flags)
*/
- if(flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
+ if(mesg->u.chunk.flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad flag value for message")
/* Dimensionality */
@@ -479,7 +462,7 @@ H5O_layout_encode(H5F_t *f, hbool_t UNUSED disable_shared, uint8_t *p, const voi
} /* end if */
else {
/* Chunk feature flags */
- *p++ = 0; /* (no features supported yet) */
+ *p++ = mesg->u.chunk.flags;
/* Number of dimensions */
HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 1355582..0195561 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -334,6 +334,28 @@ typedef struct H5O_efl_t {
*/
#define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1)
+/* Flags for chunked layout feature encoding */
+#ifdef NOT_YET
+#define H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE 0x01
+#define H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE 0x02
+#define H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS 0x04
+#define H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS 0x08
+#endif /* NOT_YET */
+#define H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS 0x10
+#ifdef NOT_YET
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_STORE_ELEM_PHASE_CHANGE \
+ | H5O_LAYOUT_CHUNK_STORE_CHUNK_PHASE_CHANGE \
+ | H5O_LAYOUT_CHUNK_STORE_ELEM_CHUNK_TRANS \
+ | H5O_LAYOUT_CHUNK_ABBREVIATE_PARTIAL_BOUND_CHUNKS \
+ | H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ )
+#else /* NOT_YET */
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ )
+#endif /* NOT_YET */
+
/* Initial version of the layout information. Used when space is allocated */
#define H5O_LAYOUT_VERSION_1 1
@@ -439,6 +461,7 @@ typedef struct H5O_layout_chunk_earray_t {
typedef struct H5O_layout_chunk_t {
H5D_chunk_index_t idx_type; /* Type of chunk index */
+ uint8_t flags; /* Chunk layout flags */
unsigned ndims; /* Num dimensions in chunk */
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
unsigned enc_bytes_per_dim; /* Encoded # of bytes for storing each chunk dimension */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 9624989..e672ad0 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -54,7 +54,7 @@
#define H5D_DEF_STORAGE_COMPACT_INIT {(hbool_t)FALSE, (size_t)0, NULL}
#define H5D_DEF_STORAGE_CONTIG_INIT {HADDR_UNDEF, (hsize_t)0}
#define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, H5D_COPS_BTREE, {{NULL}}}
-#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (unsigned)1, {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, (unsigned)0, (uint32_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}}
+#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (uint8_t)0, (unsigned)1, {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, (unsigned)0, (uint32_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}}
#ifdef H5_HAVE_C99_DESIGNATED_INITIALIZER
#define H5D_DEF_STORAGE_COMPACT {H5D_COMPACT, { .compact = H5D_DEF_STORAGE_COMPACT_INIT }}
#define H5D_DEF_STORAGE_CONTIG {H5D_CONTIGUOUS, { .contig = H5D_DEF_STORAGE_CONTIG_INIT }}
@@ -953,6 +953,129 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_chunk_opts
+ *
+ * Purpose: Sets the options related to chunked storage for a dataset.
+ * The storage must already be set to chunked.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Thursday, January 21, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_chunk_opts(hid_t plist_id, unsigned options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ uint8_t layout_flags = 0; /* "options" translated into layout message flags format */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(H5Pset_chunk_opts, FAIL)
+
+ /* Check arguments */
+ if(options & ~(H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "unknown chunk options")
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P_init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_get(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ /* Translate options into flags that can be used with the layout message */
+ if(options & H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS)
+ layout_flags |= H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS;
+
+ /* Update the layout message, including the version (if necessary) */
+ /* This probably isn't the right way to do this, and should be changed once
+ * this branch gets the "real" way to set the layout version */
+ layout.u.chunk.flags = layout_flags;
+ if(layout.version < H5O_LAYOUT_VERSION_4)
+ layout.version = H5O_LAYOUT_VERSION_4;
+
+ /* Set layout value */
+ if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't set layout")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_chunk_opts
+ *
+ * Purpose: Gets the options related to chunked storage for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Friday, January 22, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_chunk_opts(hid_t plist_id, unsigned *options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(H5Pget_chunk_opts, FAIL)
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P_init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_get(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ if(options) {
+ /* Translate options from flags that can be used with the layout message
+ * to those known to the public */
+ *options = 0;
+ if(layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ *options |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_external
*
* Purpose: Adds an external file to the list of external files. PLIST_ID
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 09eb796..acdac96 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -295,6 +295,8 @@ H5_DLL herr_t H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*
H5_DLL int H5Pget_chunk(hid_t plist_id, int max_ndims, hsize_t dim[]/*out*/);
H5_DLL herr_t H5Pset_external(hid_t plist_id, const char *name, off_t offset,
hsize_t size);
+H5_DLL herr_t H5Pset_chunk_opts(hid_t plist_id, unsigned opts);
+H5_DLL herr_t H5Pget_chunk_opts(hid_t plist_id, unsigned *opts);
H5_DLL int H5Pget_external_count(hid_t plist_id);
H5_DLL herr_t H5Pget_external(hid_t plist_id, unsigned idx, size_t name_size,
char *name/*out*/, off_t *offset/*out*/,