summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2007-11-27 21:19:42 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2007-11-27 21:19:42 (GMT)
commit6028cd41b8d81d8f099b42efc36a5f236868be5c (patch)
tree2ba7cc4bace16a06baab493858672abaddf36470 /src
parentdf3aac7f009aeb8acf650964378251db0a02ca71 (diff)
downloadhdf5-6028cd41b8d81d8f099b42efc36a5f236868be5c.zip
hdf5-6028cd41b8d81d8f099b42efc36a5f236868be5c.tar.gz
hdf5-6028cd41b8d81d8f099b42efc36a5f236868be5c.tar.bz2
[svn-r14295] Description:
- Eliminate some redundant calls to retrieve datatype sizes in chunk read/ write routines. - Change indexed storage "common" B-tree callback user data to avoid copying chunk offset and separate "downward" info from "upward" info. - Cache chunk info (nbytes/filter_mask/address) for last chunk accessed Tested on: FreeBSD/32 6.2 (duty) in debug mode FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN, in production mode Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, in production mode Mac OS X/32 10.4.10 (amazon) in debug mode Linux/64-ia64 2.4 (tg-login3) w/parallel, w/FORTRAN, in production mode
Diffstat (limited to 'src')
-rw-r--r--src/H5Dio.c40
-rw-r--r--src/H5Distore.c359
-rw-r--r--src/H5Dpkg.h20
3 files changed, 284 insertions, 135 deletions
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 66ccd66..5950c22 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -1377,6 +1377,11 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
/* Set dataset storage for I/O info */
io_info->store=&store;
+ /* Compute element sizes */
+ src_type_size = H5T_get_size(dataset->shared->type);
+ dst_type_size = H5T_get_size(mem_type);
+ max_type_size = MAX(src_type_size, dst_type_size);
+
/*
* If there is no type conversion then read directly into the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1429,13 +1434,13 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
/* Perform the actual read operation */
if((io_info->ops.read)(io_info, chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type), chunk_info->fspace,
+ src_type_size, chunk_info->fspace,
chunk_info->mspace, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, " chunked read failed")
/* Release the cache lock on the chunk. */
if(chunk) {
- accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
+ accessed_bytes = chunk_info->chunk_points * src_type_size;
if(H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
@@ -1450,7 +1455,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[1].read_timer), &timer);
- io_info->stats->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type);
+ io_info->stats->stats[1].read_nbytes += nelmts * src_type_size;
io_info->stats->stats[1].read_ncalls++;
#endif
@@ -1464,10 +1469,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if(nelmts==0)
HGOTO_DONE(SUCCEED)
- /* Compute element sizes and other parameters */
- src_type_size = H5T_get_size(dataset->shared->type);
- dst_type_size = H5T_get_size(mem_type);
- max_type_size = MAX(src_type_size, dst_type_size);
+ /* Compute buffer sizes and other parameters */
target_size = dxpl_cache->max_temp_buf;
/* XXX: This could cause a problem if the user sets their buffer size
* to the same size as the default, and then the dataset elements are
@@ -1646,7 +1648,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
/* Release the cache lock on the chunk. */
if(chunk) {
- accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
+ accessed_bytes = chunk_info->chunk_points * src_type_size;
if(H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
@@ -1778,6 +1780,11 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
} /* end if */
#endif /* H5_HAVE_PARALLEL */
+ /* Compute element sizes and other parameters */
+ src_type_size = H5T_get_size(mem_type);
+ dst_type_size = H5T_get_size(dataset->shared->type);
+ max_type_size = MAX(src_type_size, dst_type_size);
+
/*
* If there is no type conversion then write directly from the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1820,7 +1827,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if(H5D_istore_if_load(io_info, chunk_addr)) {
- accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
+ accessed_bytes = chunk_info->chunk_points * dst_type_size;
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax = FALSE;
@@ -1831,7 +1838,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
/* Perform the actual read operation */
if((io_info->ops.write)(io_info, chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type), chunk_info->fspace,
+ dst_type_size, chunk_info->fspace,
chunk_info->mspace, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, " chunked write failed")
@@ -1851,7 +1858,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].write_timer), &timer);
- io_info->stats->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type);
+ io_info->stats->stats[0].write_nbytes += nelmts * src_type_size;
io_info->stats->stats[0].write_ncalls++;
#endif
@@ -1865,10 +1872,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
if(nelmts==0)
HGOTO_DONE(SUCCEED)
- /* Compute element sizes and other parameters */
- src_type_size = H5T_get_size(mem_type);
- dst_type_size = H5T_get_size(dataset->shared->type);
- max_type_size = MAX(src_type_size, dst_type_size);
+ /* Compute buffer sizes and other parameters */
target_size = dxpl_cache->max_temp_buf;
/* XXX: This could cause a problem if the user sets their buffer size
* to the same size as the default, and then the dataset elements are
@@ -1953,11 +1957,11 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if(H5D_istore_if_load(io_info, chunk_addr)) {
- accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
+ accessed_bytes = chunk_info->chunk_points * dst_type_size;
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax=FALSE;
if(relax) {
- accessed_bytes = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)*H5T_get_size(mem_type);
+ accessed_bytes = H5S_GET_SELECT_NPOINTS(chunk_info->mspace) * src_type_size;
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax = FALSE;
}
@@ -2055,7 +2059,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
/* Release the cache lock on the chunk. */
if(chunk) {
- accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
+ accessed_bytes = chunk_info->chunk_points * dst_type_size;
if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 848668a..c10e6bf 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -553,22 +553,20 @@ H5D_istore_cmp3(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda
/* slightly odd way the library initializes the right-most node in the */
/* indexed storage B-tree... */
/* (Dump the B-tree with h5debug to look at it) -QAK */
- if(udata->mesg->u.chunk.ndims==2) {
- if(udata->key.offset[0]>rt_key->offset[0])
- ret_value=1;
- else if(udata->key.offset[0]==rt_key->offset[0] &&
- udata->key.offset[1]>=rt_key->offset[1])
- ret_value=1;
- else if(udata->key.offset[0]<lt_key->offset[0])
- ret_value=(-1);
+ if(udata->mesg->u.chunk.ndims == 2) {
+ if(udata->offset[0] > rt_key->offset[0])
+ ret_value = 1;
+ else if(udata->offset[0] == rt_key->offset[0] &&
+ udata->offset[1] >= rt_key->offset[1])
+ ret_value = 1;
+ else if(udata->offset[0] < lt_key->offset[0])
+ ret_value = (-1);
} /* end if */
else {
- if (H5V_vector_ge_u(udata->mesg->u.chunk.ndims, udata->key.offset,
- rt_key->offset))
+ if(H5V_vector_ge_u(udata->mesg->u.chunk.ndims, udata->offset, rt_key->offset))
ret_value = 1;
- else if (H5V_vector_lt_u(udata->mesg->u.chunk.ndims, udata->key.offset,
- lt_key->offset))
- ret_value = -1;
+ else if(H5V_vector_lt_u(udata->mesg->u.chunk.ndims, udata->offset, lt_key->offset))
+ ret_value = (-1);
} /* end else */
FUNC_LEAVE_NOAPI(ret_value)
@@ -615,9 +613,9 @@ H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
HDassert(addr_p);
/* Allocate new storage */
- HDassert(udata->common.key.nbytes > 0);
- H5_CHECK_OVERFLOW(udata->common.key.nbytes ,size_t, hsize_t);
- if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->common.key.nbytes)))
+ HDassert(udata->nbytes > 0);
+ H5_CHECK_OVERFLOW(udata->nbytes, size_t, hsize_t);
+ if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "couldn't allocate new file storage")
udata->addr = *addr_p;
@@ -625,24 +623,24 @@ H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
* The left key describes the storage of the UDATA chunk being
* inserted into the tree.
*/
- lt_key->nbytes = udata->common.key.nbytes;
- lt_key->filter_mask = udata->common.key.filter_mask;
- for (u=0; u<udata->common.mesg->u.chunk.ndims; u++)
- lt_key->offset[u] = udata->common.key.offset[u];
+ lt_key->nbytes = udata->nbytes;
+ lt_key->filter_mask = udata->filter_mask;
+ for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++)
+ lt_key->offset[u] = udata->common.offset[u];
/*
* The right key might already be present. If not, then add a zero-width
* chunk.
*/
- if (H5B_INS_LEFT != op) {
+ if(H5B_INS_LEFT != op) {
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
- for (u=0; u<udata->common.mesg->u.chunk.ndims; u++) {
- HDassert(udata->common.key.offset[u]+udata->common.mesg->u.chunk.dim[u] >
- udata->common.key.offset[u]);
- rt_key->offset[u] = udata->common.key.offset[u] + udata->common.mesg->u.chunk.dim[u];
- }
- }
+ for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) {
+ HDassert(udata->common.offset[u] + udata->common.mesg->u.chunk.dim[u] >
+ udata->common.offset[u]);
+ rt_key->offset[u] = udata->common.offset[u] + udata->common.mesg->u.chunk.dim[u];
+ } /* end if */
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -693,16 +691,14 @@ H5D_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
/* Is this *really* the requested chunk? */
for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++)
- if(udata->common.key.offset[u] >= lt_key->offset[u] + udata->common.mesg->u.chunk.dim[u])
+ if(udata->common.offset[u] >= lt_key->offset[u] + udata->common.mesg->u.chunk.dim[u])
HGOTO_DONE(FAIL)
/* Initialize return values */
+ HDassert(lt_key->nbytes > 0);
udata->addr = addr;
- udata->common.key.nbytes = lt_key->nbytes;
- udata->common.key.filter_mask = lt_key->filter_mask;
- HDassert(lt_key->nbytes>0);
- for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++)
- udata->common.key.offset[u] = lt_key->offset[u];
+ udata->nbytes = lt_key->nbytes;
+ udata->filter_mask = lt_key->filter_mask;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -768,18 +764,18 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
cmp = H5D_istore_cmp3(f, dxpl_id, lt_key, udata, rt_key);
HDassert(cmp <= 0);
- if (cmp < 0) {
+ if(cmp < 0) {
/* Negative indices not supported yet */
HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error")
- } else if (H5V_vector_eq_u (udata->common.mesg->u.chunk.ndims,
- udata->common.key.offset, lt_key->offset) &&
- lt_key->nbytes>0) {
+ } else if(H5V_vector_eq_u(udata->common.mesg->u.chunk.ndims,
+ udata->common.offset, lt_key->offset) &&
+ lt_key->nbytes > 0) {
/*
* Already exists. If the new size is not the same as the old size
* then we should reallocate storage.
*/
- if (lt_key->nbytes != udata->common.key.nbytes) {
+ if(lt_key->nbytes != udata->nbytes) {
/* Currently, the old chunk data is "thrown away" after the space is reallocated,
* so avoid data copy in H5MF_realloc() call by just free'ing the space and
* allocating new space.
@@ -791,18 +787,18 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
*/
#ifdef OLD_WAY
if(HADDR_UNDEF == (*new_node_p = H5MF_realloc(f, H5FD_MEM_DRAW, addr,
- (hsize_t)lt_key->nbytes, (hsize_t)udata->common.key.nbytes)))
+ (hsize_t)lt_key->nbytes, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk storage")
#else /* OLD_WAY */
H5_CHECK_OVERFLOW( lt_key->nbytes ,size_t, hsize_t);
if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes)<0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
- H5_CHECK_OVERFLOW(udata->common.key.nbytes ,size_t, hsize_t);
- if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->common.key.nbytes)))
+ H5_CHECK_OVERFLOW(udata->nbytes ,size_t, hsize_t);
+ if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk")
#endif /* OLD_WAY */
- lt_key->nbytes = udata->common.key.nbytes;
- lt_key->filter_mask = udata->common.key.filter_mask;
+ lt_key->nbytes = udata->nbytes;
+ lt_key->filter_mask = udata->filter_mask;
*lt_key_changed = TRUE;
udata->addr = *new_node_p;
ret_value = H5B_INS_CHANGE;
@@ -813,26 +809,26 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
} else if (H5V_hyper_disjointp(udata->common.mesg->u.chunk.ndims,
lt_key->offset, udata->common.mesg->u.chunk.dim,
- udata->common.key.offset, udata->common.mesg->u.chunk.dim)) {
+ udata->common.offset, udata->common.mesg->u.chunk.dim)) {
HDassert(H5V_hyper_disjointp(udata->common.mesg->u.chunk.ndims,
rt_key->offset, udata->common.mesg->u.chunk.dim,
- udata->common.key.offset, udata->common.mesg->u.chunk.dim));
+ udata->common.offset, udata->common.mesg->u.chunk.dim));
/*
* Split this node, inserting the new new node to the right of the
* current node. The MD_KEY is where the split occurs.
*/
- md_key->nbytes = udata->common.key.nbytes;
- md_key->filter_mask = udata->common.key.filter_mask;
+ md_key->nbytes = udata->nbytes;
+ md_key->filter_mask = udata->filter_mask;
for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) {
- HDassert(0 == udata->common.key.offset[u] % udata->common.mesg->u.chunk.dim[u]);
- md_key->offset[u] = udata->common.key.offset[u];
- }
+ HDassert(0 == udata->common.offset[u] % udata->common.mesg->u.chunk.dim[u]);
+ md_key->offset[u] = udata->common.offset[u];
+ } /* end for */
/*
* Allocate storage for the new chunk
*/
- H5_CHECK_OVERFLOW(udata->common.key.nbytes ,size_t, hsize_t);
- if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->common.key.nbytes)))
+ H5_CHECK_OVERFLOW(udata->nbytes, size_t, hsize_t);
+ if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "file allocation failed")
udata->addr = *new_node_p;
ret_value = H5B_INS_RIGHT;
@@ -911,7 +907,7 @@ H5D_istore_iter_chunkmap (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5D_istore_iter_allocated() */
+} /* H5D_istore_iter_chunkmap() */
/*-------------------------------------------------------------------------
@@ -1095,17 +1091,19 @@ H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key,
HDmemcpy(buf, bkg, buf_size);
} /* end if */
- /* Copy source chunk callback information for insertion */
- HDmemset(&udata_dst, 0, sizeof(udata_dst));
- HDmemcpy(&(udata_dst.common.key), lt_key, sizeof(H5D_istore_key_t));
+ /* Set up destination chunk callback information for insertion */
udata_dst.common.mesg = udata->common.mesg; /* Share this pointer for a short while */
+ udata_dst.common.offset = lt_key->offset;
+ udata_dst.nbytes = lt_key->nbytes;
+ udata_dst.filter_mask = lt_key->filter_mask;
+ udata_dst.addr = HADDR_UNDEF;
/* Need to compress variable-length & reference data elements before writing to file */
if(is_compressed && (is_vlen || fix_ref) ) {
- if(H5Z_pipeline(pline, 0, &(udata_dst.common.key.filter_mask), edc_read,
+ if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), edc_read,
cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
- udata_dst.common.key.nbytes = nbytes;
+ udata_dst.nbytes = nbytes;
udata->buf = buf;
udata->buf_size = buf_size;
} /* end if */
@@ -1125,6 +1123,120 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D_istore_cinfo_cache_reset
+ *
+ * Purpose: Reset the cached chunk info
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * November 27, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D_istore_cinfo_cache_reset(H5D_chunk_cached_t *last)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_reset)
+
+ /* Sanity check */
+ HDassert(last);
+
+ /* Indicate that the cached info is not valid */
+ last->valid = FALSE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D_istore_cinfo_cache_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_istore_cinfo_cache_update
+ *
+ * Purpose: Update the cached chunk info
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * November 27, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D_istore_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_istore_ud1_t *udata)
+{
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_update)
+
+ /* Sanity check */
+ HDassert(last);
+ HDassert(udata);
+ HDassert(udata->common.mesg);
+ HDassert(udata->common.offset);
+
+ /* Stored the information to cache */
+ for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++)
+ last->offset[u] = udata->common.offset[u];
+ last->nbytes = udata->nbytes;
+ last->filter_mask = udata->filter_mask;
+ last->addr = udata->addr;
+
+ /* Indicate that the cached info is valid */
+ last->valid = TRUE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D_istore_cinfo_cache_update() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_istore_cinfo_cache_found
+ *
+ * Purpose: Look for chunk info in cache
+ *
+ * Return: TRUE/FALSE/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * November 27, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D_istore_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_istore_ud1_t *udata)
+{
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_found)
+
+ /* Sanity check */
+ HDassert(last);
+ HDassert(udata);
+ HDassert(udata->common.mesg);
+ HDassert(udata->common.offset);
+
+ /* Check if the cached information is what is desired */
+ if(last->valid) {
+ unsigned u; /* Local index variable */
+
+ /* Check that the offset is the same */
+ for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++)
+ if(last->offset[u] != udata->common.offset[u])
+ HGOTO_DONE(FALSE)
+
+ /* Retrieve the information from the cache */
+ udata->nbytes = last->nbytes;
+ udata->filter_mask = last->filter_mask;
+ udata->addr = last->addr;
+
+ /* Indicate that the data was found */
+ HGOTO_DONE(TRUE)
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_istore_cinfo_cache_found() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D_istore_init
*
* Purpose: Initialize the raw data chunk cache for a dataset. This is
@@ -1138,23 +1250,26 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_init (const H5F_t *f, const H5D_t *dset)
+H5D_istore_init(const H5F_t *f, const H5D_t *dset)
{
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5D_istore_init, FAIL)
- if (H5F_RDCC_NBYTES(f)>0 && H5F_RDCC_NELMTS(f)>0) {
- rdcc->nbytes=H5F_RDCC_NBYTES(f);
+ if(H5F_RDCC_NBYTES(f) > 0 && H5F_RDCC_NELMTS(f) > 0) {
+ rdcc->nbytes = H5F_RDCC_NBYTES(f);
rdcc->nslots = H5F_RDCC_NELMTS(f);
rdcc->slot = H5FL_SEQ_CALLOC (H5D_rdcc_ent_ptr_t,rdcc->nslots);
- if (NULL==rdcc->slot)
+ if(NULL==rdcc->slot)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Reset any cached chunk info for this dataset */
+ H5D_istore_cinfo_cache_reset(&(rdcc->last));
} /* end if */
/* Allocate the shared structure */
- if(H5D_istore_shared_create(f, &dset->shared->layout)<0)
+ if(H5D_istore_shared_create(f, &dset->shared->layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1179,11 +1294,10 @@ done:
static herr_t
H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_t reset)
{
- herr_t ret_value=SUCCEED; /*return value */
- unsigned u; /*counters */
- void *buf=NULL; /*temporary buffer */
+ void *buf = NULL; /*temporary buffer */
size_t alloc; /*bytes allocated for BUF */
hbool_t point_of_no_return = FALSE;
+ herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_flush_entry)
@@ -1193,30 +1307,33 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
assert(!ent->locked);
buf = ent->chunk;
- if (ent->dirty) {
+ if(ent->dirty) {
H5D_istore_ud1_t udata; /*pass through B-tree */
+ /* Initial buffer size */
+ alloc = ent->alloc_size;
+
+ /* Set up user data for B-tree callbacks */
udata.common.mesg = &io_info->dset->shared->layout;
- udata.common.key.filter_mask = 0;
+ udata.common.offset = ent->offset;
+ udata.filter_mask = 0;
+ udata.nbytes = ent->chunk_size;
udata.addr = HADDR_UNDEF;
- udata.common.key.nbytes = ent->chunk_size;
- for (u=0; u<io_info->dset->shared->layout.u.chunk.ndims; u++)
- udata.common.key.offset[u] = ent->offset[u];
- alloc = ent->alloc_size;
/* Should the chunk be filtered before writing it to disk? */
- if (io_info->dset->shared->dcpl_cache.pline.nused) {
- if (!reset) {
+ if(io_info->dset->shared->dcpl_cache.pline.nused) {
+ if(!reset) {
/*
* Copy the chunk to a new buffer before running it through
* the pipeline because we'll want to save the original buffer
* for later.
*/
alloc = ent->chunk_size;
- if (NULL==(buf = H5MM_malloc(alloc)))
+ if(NULL == (buf = H5MM_malloc(alloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline")
HDmemcpy(buf, ent->chunk, ent->chunk_size);
- } else {
+ } /* end if */
+ else {
/*
* If we are reseting and something goes wrong after this
* point then it's too late to recover because we may have
@@ -1226,11 +1343,11 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
*/
point_of_no_return = TRUE;
ent->chunk = NULL;
- }
- if (H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.common.key.filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &(udata.common.key.nbytes), &alloc, &buf)<0)
+ } /* end else */
+ if(H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb, &(udata.nbytes), &alloc, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
- }
+ } /* end if */
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if
@@ -1238,9 +1355,12 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
*/
if(H5B_insert(io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk")
- if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, io_info->dxpl_id, buf) < 0)
+ if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, io_info->dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
+ /* Cache the chunk's info, in case it's accessed again shortly */
+ H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+
/* Mark cache entry as clean */
ent->dirty = FALSE;
#ifdef H5D_ISTORE_DEBUG
@@ -1249,17 +1369,17 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
} /* end if */
/* Reset, but do not free or removed from list */
- if (reset) {
+ if(reset) {
point_of_no_return = FALSE;
- if(buf==ent->chunk)
+ if(buf == ent->chunk)
buf = NULL;
- if(ent->chunk!=NULL)
+ if(ent->chunk != NULL)
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline));
} /* end if */
done:
/* Free the temp buffer only if it's different than the entry chunk */
- if (buf!=ent->chunk)
+ if(buf != ent->chunk)
H5MM_xfree(buf);
/*
@@ -1268,9 +1388,9 @@ done:
* output pipeline failed. Do not free the entry or remove it from the
* list.
*/
- if (ret_value<0 && point_of_no_return) {
+ if(ret_value < 0 && point_of_no_return) {
if(ent->chunk)
- ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline));
+ ent->chunk = H5D_istore_chunk_xfree(ent->chunk, &(io_info->dset->shared->dcpl_cache.pline));
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1779,15 +1899,15 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
*/
/* Chunk size on disk isn't [likely] the same size as the final chunk
* size in memory, so allocate memory big enough. */
- chunk_alloc = udata->common.key.nbytes;
+ chunk_alloc = udata->nbytes;
if(NULL == (chunk = H5D_istore_chunk_alloc (chunk_alloc, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, udata->common.key.nbytes, io_info->dxpl_id, chunk) < 0)
+ if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, udata->nbytes, io_info->dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
if(pline->nused)
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->common.key.filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &(udata->common.key.nbytes), &chunk_alloc, &chunk) < 0)
+ if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb, &(udata->nbytes), &chunk_alloc, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
#ifdef H5D_ISTORE_DEBUG
rdcc->nmisses++;
@@ -2533,7 +2653,6 @@ H5D_istore_get_addr(const H5D_io_info_t *io_info, H5D_istore_ud1_t *_udata)
{
H5D_istore_ud1_t tmp_udata; /* Information about a chunk */
H5D_istore_ud1_t *udata; /* Pointer to information about a chunk */
- unsigned u;
haddr_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_get_addr)
@@ -2547,28 +2666,39 @@ H5D_istore_get_addr(const H5D_io_info_t *io_info, H5D_istore_ud1_t *_udata)
udata = (_udata != NULL ? _udata : &tmp_udata);
/* Initialize the information about the chunk we are looking for */
- for(u = 0; u < io_info->dset->shared->layout.u.chunk.ndims; u++)
- udata->common.key.offset[u] = io_info->store->chunk.offset[u];
udata->common.mesg = &(io_info->dset->shared->layout);
+ udata->common.offset = io_info->store->chunk.offset;
+ udata->nbytes = 0;
+ udata->filter_mask = 0;
udata->addr = HADDR_UNDEF;
- /* Go get the chunk information */
- if (H5B_find (io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, udata)<0) {
- /* Note: don't push error on stack, leave that to next higher level,
- * since many times the B-tree is searched in order to determine
- * if a chunk exists in the B-tree or not. -QAK
- */
+ /* Check for cached information */
+ if(!H5D_istore_cinfo_cache_found(&io_info->dset->shared->cache.chunk.last, udata)) {
+ /* Go get the chunk information */
+ if(H5B_find(io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, udata) < 0) {
+ /* Note: don't push error on stack, leave that to next higher level,
+ * since many times the B-tree is searched in order to determine
+ * if a chunk exists in the B-tree or not. -QAK
+ */
#ifdef OLD_WAY
- H5E_clear_stack(NULL);
+ H5E_clear_stack(NULL);
- HGOTO_ERROR(H5E_BTREE,H5E_NOTFOUND,HADDR_UNDEF,"Can't locate chunk info")
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, HADDR_UNDEF, "Can't locate chunk info")
#else /* OLD_WAY */
- HGOTO_DONE(HADDR_UNDEF)
+ /* Cache the fact that the chunk is not in the B-tree */
+ H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, udata);
+
+ HGOTO_DONE(HADDR_UNDEF)
#endif /* OLD_WAY */
- } /* end if */
+ } /* end if */
+
+ /* Cache the information retrieved */
+ HDassert(H5F_addr_defined(udata->addr));
+ H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, udata);
+ } /* end else */
/* Success! Set the return value */
- ret_value=udata->addr;
+ ret_value = udata->addr;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2843,11 +2973,10 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
/* Initialize the chunk information */
udata.common.mesg = layout;
- udata.common.key.filter_mask = filter_mask;
+ udata.common.offset = chunk_offset;
+ udata.nbytes = chunk_size;
+ udata.filter_mask = filter_mask;
udata.addr = HADDR_UNDEF;
- udata.common.key.nbytes = chunk_size;
- for(u = 0; u < layout->u.chunk.ndims; u++)
- udata.common.key.offset[u] = chunk_offset[u];
/* Allocate the chunk with all processes */
if(H5B_insert(dset->oloc.file, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata) < 0)
@@ -2864,7 +2993,7 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE == mpi_rank)
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Indicate that blocks are being written */
@@ -2872,7 +3001,7 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -3011,9 +3140,8 @@ H5D_istore_prune_remove(void *item, void UNUSED *key, void *op_data)
HDassert(rm_info);
/* Initialize the user data for the B-tree callback */
- HDmemset(&bt_udata, 0, sizeof bt_udata);
- bt_udata.key = sl_node->key;
bt_udata.mesg = rm_info->mesg;
+ bt_udata.offset = sl_node->key.offset;
/* Remove */
if(H5B_remove(rm_info->f, rm_info->dxpl_id, H5B_ISTORE, rm_info->mesg->u.chunk.addr, &bt_udata) < 0)
@@ -3221,6 +3349,9 @@ H5D_istore_prune_by_extent(const H5D_io_info_t *io_info, const hsize_t *old_dims
/* Destroy the skip list, deleting the chunks in the callback */
H5SL_destroy(udata.outside, H5D_istore_prune_remove, &rm_info);
+ /* Reset any cached chunk info for this dataset */
+ H5D_istore_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_istore_prune_by_extent() */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 4fbec67..3967cf9 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -119,6 +119,15 @@ typedef struct H5D_io_info_t {
#endif /* H5S_DEBUG */
} H5D_io_info_t;
+/* Cached information about a particular chunk */
+typedef struct {
+ hbool_t valid; /*whether cache info is valid*/
+ hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
+ size_t nbytes; /*size of stored data */
+ unsigned filter_mask; /*excluded filters */
+ haddr_t addr; /*file address of chunk */
+} H5D_chunk_cached_t;
+
/* The raw data chunk cache */
typedef struct H5D_rdcc_t {
#ifdef H5D_ISTORE_DEBUG
@@ -132,6 +141,7 @@ typedef struct H5D_rdcc_t {
struct H5D_rdcc_ent_t *head; /* Head of doubly linked list */
struct H5D_rdcc_ent_t *tail; /* Tail of doubly linked list */
int nused; /* Number of chunk slots in use */
+ H5D_chunk_cached_t last; /* Cached copy of last chunk information */
struct H5D_rdcc_ent_t **slot; /* Chunk slots, each points to a chunk*/
} H5D_rdcc_t;
@@ -289,14 +299,18 @@ typedef struct H5D_istore_key_t {
*/
typedef struct H5D_istore_bt_ud_common_t {
/* downward */
- H5D_istore_key_t key; /*key values */
- const H5O_layout_t *mesg; /*layout message */
+ const H5O_layout_t *mesg; /*layout message */
+ const hsize_t *offset; /*logical offset of chunk*/
} H5D_istore_bt_ud_common_t;
/* B-tree callback info for various operations */
typedef struct H5D_istore_ud1_t {
H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */
- haddr_t addr; /*file address of chunk */
+
+ /* Upward */
+ size_t nbytes; /*size of stored data */
+ unsigned filter_mask; /*excluded filters */
+ haddr_t addr; /*file address of chunk */
} H5D_istore_ud1_t;
/* Internal data structure for computing variable-length dataset's total size */