summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2004-05-31 19:59:59 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2004-05-31 19:59:59 (GMT)
commit1ec351813bd999925e4d4ba2a93f28b3c84c405f (patch)
tree596b980da63ba784a82c636b9a2b59e8754cf0f0
parentf0efc265de355b89203bfde05d358999cb6ef47f (diff)
downloadhdf5-1ec351813bd999925e4d4ba2a93f28b3c84c405f.zip
hdf5-1ec351813bd999925e4d4ba2a93f28b3c84c405f.tar.gz
hdf5-1ec351813bd999925e4d4ba2a93f28b3c84c405f.tar.bz2
[svn-r8600] Purpose:
Code optimization Description: Don't recompute the internal index value for looking up the chunk in the hash table, just use the value already computed from iterating through the chunks. Platforms tested: Solaris 2.7 (arabica) FreeBSD 4.9 (sleipnir) w/parallel
-rw-r--r--src/H5D.c34
-rw-r--r--src/H5Dio.c62
-rw-r--r--src/H5Distore.c234
-rw-r--r--src/H5Dprivate.h5
-rw-r--r--src/H5Dseq.c4
-rw-r--r--src/H5F.c6
-rw-r--r--src/H5Fistore.c234
-rw-r--r--src/H5Fpkg.h5
-rw-r--r--src/H5Fprivate.h4
-rw-r--r--src/H5Fseq.c4
-rw-r--r--src/H5Oprivate.h8
-rw-r--r--src/H5Pdcpl.c4
-rw-r--r--src/H5Z.c1
-rw-r--r--test/dsets.c121
-rw-r--r--test/set_extent.c177
-rw-r--r--test/tmisc.c4
16 files changed, 587 insertions, 320 deletions
diff --git a/src/H5D.c b/src/H5D.c
index 7b74f8c..f5793b6 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -199,7 +199,7 @@ H5D_init_interface(void)
H5P_genplist_t *def_dcpl; /* Default Dataset Creation Property list */
size_t nprops; /* Number of properties */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_init_interface)
/* Initialize the atom group for the dataset IDs */
@@ -2048,7 +2048,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
assert (H5I_GENPROP_LST==H5I_get_type(dxpl_id));
/* Check if the filters in the DCPL can be applied to this dataset */
- if (H5Z_can_apply(dcpl_id,type_id)<0)
+ if(H5Z_can_apply(dcpl_id,type_id)<0)
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, NULL, "I/O filters can't operate on this dataset")
/* Get the dataset's datatype */
@@ -2056,15 +2056,15 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a datatype")
/* Check if the datatype is "sensible" for use in a dataset */
- if (H5T_is_sensible(type)!=TRUE)
+ if(H5T_is_sensible(type)!=TRUE)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "datatype is not sensible")
/* Check if the datatype is/contains a VL-type */
- if (H5T_detect_class(type, H5T_VLEN))
+ if(H5T_detect_class(type, H5T_VLEN))
has_vl_type=TRUE;
/* Initialize the dataset object */
- if (NULL == (new_dset = H5D_new(dcpl_id,TRUE,has_vl_type)))
+ if(NULL == (new_dset = H5D_new(dcpl_id,TRUE,has_vl_type)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Make the "set local" filter callbacks for this dataset */
@@ -2072,7 +2072,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to set local filter parameters")
/* What file is the dataset being added to? */
- if (NULL==(file=H5G_insertion_file(loc, name, dxpl_id)))
+ if(NULL==(file=H5G_insertion_file(loc, name, dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to locate insertion point")
/* Copy datatype for dataset */
@@ -2080,7 +2080,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "can't copy datatype")
/* Mark any datatypes as being on disk now */
- if (H5T_set_loc(new_dset->type, file, H5T_LOC_DISK)<0)
+ if(H5T_set_loc(new_dset->type, file, H5T_LOC_DISK)<0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "invalid datatype location")
/* Copy dataspace for dataset */
@@ -2216,7 +2216,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
* Chunked storage allows any type of data space extension, so we
* don't even bother checking.
*/
- if(chunk_ndims != ndims)
+ if(chunk_ndims != (unsigned)ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space")
if (new_dset->efl.nused>0)
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout")
@@ -2400,7 +2400,7 @@ H5D_open(H5G_entry_t *ent, hid_t dxpl_id)
/* Clear any errors from H5FO_opened() */
H5E_clear(NULL);
-
+
/* Open the dataset object */
if ((dataset=H5D_open_oid(ent, dxpl_id)) ==NULL)
HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, FAIL, "not found")
@@ -2802,6 +2802,11 @@ H5D_extend (H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
if (H5S_modify (&(dataset->ent), space, TRUE, dxpl_id)<0)
HGOTO_ERROR (H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace")
+ /* Update the index values for the cached chunks for this dataset */
+ if(H5D_CHUNKED == dataset->layout.type)
+ if(H5F_istore_update_cache(dataset->ent.file, dxpl_id, &dataset->layout, space) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+
/* Allocate space for the new parts of the dataset, if appropriate */
if(dataset->alloc_time==H5D_ALLOC_TIME_EARLY)
if (H5D_alloc_storage(dataset->ent.file, dxpl_id, dataset, H5D_ALLOC_EXTEND, TRUE, FALSE)<0)
@@ -3208,7 +3213,6 @@ done:
static hsize_t
H5D_get_storage_size(const H5D_t *dset, hid_t dxpl_id)
{
- unsigned u; /* Index variable */
hsize_t ret_value;
FUNC_ENTER_NOAPI(H5D_get_storage_size, 0)
@@ -3218,8 +3222,7 @@ H5D_get_storage_size(const H5D_t *dset, hid_t dxpl_id)
if(dset->layout.u.chunk.addr == HADDR_UNDEF)
ret_value=0;
else
- ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.u.chunk.ndims,
- dset->layout.u.chunk.addr);
+ ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, &dset->layout);
break;
case H5D_CONTIGUOUS:
@@ -3419,7 +3422,7 @@ H5Diterate(void *buf, hid_t type_id, hid_t space_id, H5D_operator_t op,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid datatype")
if (NULL == (space = H5I_object_verify(space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataspace")
-
+
ret_value=H5S_select_iterate(buf,type_id,space,op,operator_data);
done:
@@ -3803,6 +3806,11 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
if(H5S_modify(&(dset->ent), space, TRUE, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace")
+ /* Update the index values for the cached chunks for this dataset */
+ if(H5D_CHUNKED == dset->layout.type)
+ if(H5F_istore_update_cache(dset->ent.file, dxpl_id, &dset->layout, space) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+
/* Allocate space for the new parts of the dataset, if appropriate */
if(expand && dset->alloc_time==H5D_ALLOC_TIME_EARLY)
if(H5D_alloc_storage(dset->ent.file, dxpl_id, dset, H5D_ALLOC_EXTEND, TRUE, FALSE) < 0)
diff --git a/src/H5Dio.c b/src/H5Dio.c
index a465413..2d6d016 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -64,7 +64,7 @@ typedef struct fm_map {
unsigned m_ndims; /* Number of dimensions for memory dataspace */
hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */
hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */
- hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
H5O_layout_t *layout; /* Dataset layout information*/
H5S_sel_type msel_type; /* Selection type in memory */
} fm_map;
@@ -130,7 +130,6 @@ H5FL_BLK_DEFINE(type_conv);
/* Declare a free list to manage the H5D_chunk_info_t struct */
H5FL_DEFINE_STATIC(H5D_chunk_info_t);
-
/*--------------------------------------------------------------------------
NAME
@@ -175,7 +174,7 @@ H5Dfill(const void *fill, hid_t fill_type_id, void *buf, hid_t buf_type_id, hid_
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
if (NULL == (buf_type=H5I_object_verify(buf_type_id, H5I_DATATYPE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
-
+
/* Fill the selection in the memory buffer */
if(H5D_fill(fill,fill_type,buf,buf_type,space, H5AC_dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed")
@@ -308,7 +307,7 @@ H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache)
/* Get the dataset transfer property list */
if (NULL == (dx_plist = H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
-
+
/* Get maximum temporary buffer size */
if(H5P_get(dx_plist, H5D_XFER_MAX_TEMP_BUF_NAME, &cache->max_temp_buf)<0)
HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve maximum temporary buffer size")
@@ -478,7 +477,7 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
if(H5S_SELECT_VALID(file_space)!=TRUE)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent")
}
-
+
/* Get the default dataset transfer property list if the user didn't provide one */
if (H5P_DEFAULT == plist_id)
plist_id= H5P_DATASET_XFER_DEFAULT;
@@ -1071,14 +1070,13 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
hsize_t smine_start; /*strip mine start loc */
hsize_t n, smine_nelmts; /*elements per strip */
herr_t ret_value = SUCCEED; /*return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_contig_read)
-
+
/*
* If there is no type conversion then read directly into the
* application's buffer. This saves at least one mem-to-mem copy.
*/
-
if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) {
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
@@ -1219,7 +1217,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
*/
if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
-
/* Do the data transform after the conversion (since we're using type mem_type) */
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
@@ -1229,7 +1226,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
}
/* end of LA additions */
-
/*
* Scatter the data into memory.
*/
@@ -1238,8 +1234,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
#endif
status = H5S_select_mscat(tconv_buf, mem_space,
&mem_iter, smine_nelmts, dxpl_cache, buf/*out*/);
-
-
#ifdef H5S_DEBUG
H5_timer_end(&(sconv->stats[1].scat_timer), &timer);
sconv->stats[1].scat_nbytes += smine_nelmts * dst_type_size;
@@ -1321,12 +1315,11 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_contig_write)
-
+
/*
* If there is no type conversion then write directly from the
* application's buffer. This saves at least one mem-to-mem copy.
*/
-
if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) {
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
@@ -1335,7 +1328,6 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5
status = (sconv->write)(dataset->ent.file, &(dataset->layout),
&dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), (size_t)nelmts, H5T_get_size(dataset->type),
file_space, mem_space, dxpl_cache, dxpl_id, buf);
-
#ifdef H5S_DEBUG
H5_timer_end(&(sconv->stats[0].write_timer), &timer);
sconv->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type);
@@ -1456,8 +1448,6 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed")
} /* end if */
-
-
/*
* Perform data type conversion.
*/
@@ -1472,19 +1462,15 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5
}
/* End of LA modifications */
-
/*
* Scatter the data out to the file.
*/
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
-
status = H5S_select_fscat(dataset->ent.file, &(dataset->layout),
&dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), file_space, &file_iter,
smine_nelmts, dxpl_cache, dxpl_id, tconv_buf);
-
-
#ifdef H5S_DEBUG
H5_timer_end(&(sconv->stats[0].scat_timer), &timer);
sconv->stats[0].scat_nbytes += smine_nelmts * dst_type_size;
@@ -1534,7 +1520,6 @@ done:
* Leon Arber: 4/20/04
* Added support for data transforms.
*
-
*-------------------------------------------------------------------------
*/
/* ARGSUSED */
@@ -1571,7 +1556,7 @@ UNUSED
uint8_t *bkg_buf = NULL; /*background buffer */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
herr_t ret_value = SUCCEED; /*return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read)
/* Map elements between file and memory for each chunk*/
@@ -1602,7 +1587,8 @@ UNUSED
chunk_info=chunk_node->data;
/* Pass in chunk's coordinates in a union. */
- store.chunk_coords = chunk_info->coords;
+ store.chunk.offset = chunk_info->coords;
+ store.chunk.index = chunk_info->index;
/* Perform the actual read operation */
status = (sconv->read)(dataset->ent.file, &(dataset->layout),
@@ -1701,7 +1687,8 @@ UNUSED
bkg_iter_init=1; /*file selection iteration info has been initialized */
/* Pass in chunk's coordinates in a union*/
- store.chunk_coords = chunk_info->coords;
+ store.chunk.offset = chunk_info->coords;
+ store.chunk.index = chunk_info->index;
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
@@ -1756,10 +1743,8 @@ UNUSED
/* Do the data transform after the conversion (since we're using type mem_type) */
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
- {
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, mem_type) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform")
- }
/*
* Scatter the data into memory.
@@ -1804,7 +1789,7 @@ done:
if(file_iter_init) {
if(H5S_SELECT_ITER_RELEASE(&file_iter)<0)
HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator")
- } /* end if */
+ } /* end if */
if(mem_iter_init) {
if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0)
HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator")
@@ -1932,7 +1917,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
chunk_info=chunk_node->data;
/* Pass in chunk's coordinates in a union. */
- store.chunk_coords = chunk_info->coords;
+ store.chunk.offset = chunk_info->coords;
+ store.chunk.index = chunk_info->index;
/* Perform the actual write operation */
status = (sconv->write)(dataset->ent.file, &(dataset->layout),
@@ -2051,7 +2037,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
bkg_iter_init=1; /*file selection iteration info has been initialized */
/*pass in chunk's coordinates in a union*/
- store.chunk_coords = chunk_info->coords;
+ store.chunk.offset = chunk_info->coords;
+ store.chunk.index = chunk_info->index;
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
@@ -2093,8 +2080,6 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
if (n!=smine_nelmts)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed")
} /* end if */
-
-
/*
* Perform data type conversion.
@@ -2105,10 +2090,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
/* Do the data transform after the type conversion (since we're using dataset->type) */
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
- {
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->type) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform")
- }
/*
* Scatter the data out to the file.
@@ -2302,7 +2285,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
hbool_t iter_init=0; /* Selection iteration info has been initialized */
unsigned f_ndims; /* The number of dimensions of the file's dataspace */
int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */
- hsize_t nchunks, last_nchunks; /* Number of chunks in dataset */
H5TB_NODE *curr_node; /* Current node in TBBT */
H5S_sel_type fsel_type; /* Selection type on disk */
char bogus; /* "bogus" buffer to pass to selection iterator */
@@ -2358,22 +2340,12 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
/* Decide the number of chunks in each dimension*/
- last_nchunks=0;
- nchunks=1;
for(u=0; u<f_ndims; u++) {
/* Keep the size of the chunk dimensions as hsize_t for various routines */
fm->chunk_dim[u]=fm->layout->u.chunk.dim[u];
/* Round up to the next integer # of chunks, to accomodate partial chunks */
fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.u.chunk.dim[u])-1) / dataset->layout.u.chunk.dim[u];
-
- /* Track total number of chunks in dataset */
- nchunks *= fm->chunks[u];
-
- /* Check if the chunk indices will overflow */
- if(nchunks<last_nchunks)
- HGOTO_ERROR (H5E_DATASET, H5E_OVERFLOW, FAIL, "too many chunks")
- last_nchunks=nchunks;
} /* end for */
/* Compute the "down" size of 'chunks' information */
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 2434cc2..a5aeb70 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -1346,7 +1346,7 @@ static void *
H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout,
const H5O_pline_t *pline, const H5O_fill_t *fill, H5D_fill_time_t fill_time,
H5F_istore_ud1_t *udata,
- const hssize_t offset[], hbool_t relax,
+ const H5D_storage_t *store, hbool_t relax,
unsigned *idx_hint/*in,out*/)
{
int idx=0; /*hash index number */
@@ -1369,18 +1369,14 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
/* Search for the chunk in the cache */
if (rdcc->nslots>0) {
- for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) {
- temp_idx += offset[u];
- temp_idx *= layout->u.chunk.dim[u];
- }
- temp_idx += (hsize_t)(layout->u.chunk.addr);
+ temp_idx = store->chunk.index + (hsize_t)(layout->u.chunk.addr);
idx=H5F_HASH(f,temp_idx);
ent = rdcc->slot[idx];
if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) {
- if (offset[u]!=ent->offset[u]) {
+ if (store->chunk.offset[u]!=ent->offset[u]) {
found = FALSE;
break;
}
@@ -1423,7 +1419,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
* Not in the cache. Read it from the file and count this as a miss
* if it's in the file or an init if it isn't.
*/
- chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, offset, udata);
+ chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata);
} /* end else */
if (H5F_addr_defined(chunk_addr)) {
@@ -1508,7 +1504,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout);
H5O_copy(H5O_PLINE_ID, pline, &ent->pline);
for (u=0; u<layout->u.chunk.ndims; u++)
- ent->offset[u] = offset[u];
+ ent->offset[u] = store->chunk.offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
ent->chunk = chunk;
@@ -1612,7 +1608,7 @@ done:
static herr_t
H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
const H5O_layout_t *layout, const H5O_pline_t *pline, hbool_t dirty,
- const hssize_t offset[], unsigned *idx_hint, uint8_t *chunk, size_t naccessed)
+ const H5D_storage_t *store, unsigned idx_hint, uint8_t *chunk, size_t naccessed)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = NULL;
@@ -1621,13 +1617,13 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_unlock);
- if (UINT_MAX==*idx_hint) {
+ if (UINT_MAX==idx_hint) {
/*not in cache*/
} else {
- assert(*idx_hint<rdcc->nslots);
- assert(rdcc->slot[*idx_hint]);
- assert(rdcc->slot[*idx_hint]->chunk==chunk);
- found = *idx_hint;
+ assert(idx_hint<rdcc->nslots);
+ assert(rdcc->slot[idx_hint]);
+ assert(rdcc->slot[idx_hint]->chunk==chunk);
+ found = idx_hint;
}
if (found<0) {
@@ -1645,7 +1641,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
H5O_copy (H5O_LAYOUT_ID, layout, &x.layout);
H5O_copy (H5O_PLINE_ID, pline, &x.pline);
for (u=0; u<layout->u.chunk.ndims; u++)
- x.offset[u] = offset[u];
+ x.offset[u] = store->chunk.offset[u];
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
@@ -1692,7 +1688,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
*/
ssize_t
H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[],
+ const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf)
@@ -1718,7 +1714,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
#ifndef NDEBUG
for (u=0; u<layout->u.chunk.ndims; u++)
- assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
+ assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
@@ -1727,7 +1723,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
+ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
@@ -1757,7 +1753,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time,
- &udata, chunk_coords, FALSE, &idx_hint)))
+ &udata, store, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -1766,7 +1762,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, FALSE,
- chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0)
+ store, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk");
/* Set return value */
@@ -1796,7 +1792,7 @@ done:
ssize_t
H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
hid_t dxpl_id, const H5O_layout_t *layout,
- const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[],
+ const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf)
@@ -1822,7 +1818,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
#ifndef NDEBUG
for (u=0; u<layout->u.chunk.ndims; u++)
- assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
+ assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
@@ -1831,7 +1827,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
+ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
@@ -1877,7 +1873,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
relax = FALSE;
if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time,
- &udata, chunk_coords, relax, &idx_hint)))
+ &udata, store, relax, &idx_hint)))
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -1886,7 +1882,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, TRUE,
- chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0)
+ store, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk");
/* Set return value */
@@ -1967,16 +1963,33 @@ done:
*-------------------------------------------------------------------------
*/
hsize_t
-H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
+H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout)
{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
+ H5F_rdcc_ent_t *ent; /*cache entry */
+ H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */
H5F_istore_ud1_t udata;
hsize_t ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_allocated, 0);
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache")
+
+ /* Search for cached chunks that haven't been written out */
+ for(ent = rdcc->head; ent; ent = ent->next) {
+ /* Make certain we are dealing with the correct B-tree, etc */
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
+ /* Flush the chunk out to disk, to make certain the size is correct later */
+ if (H5F_istore_flush_entry(f, &dxpl_cache, dxpl_id, ent, FALSE)<0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer");
+ } /* end if */
+ } /* end for */
+
HDmemset(&udata, 0, sizeof udata);
- udata.mesg.u.chunk.ndims = ndims;
- if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0)
+ udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
+ if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
/* Set return value */
@@ -1984,7 +1997,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
done:
FUNC_LEAVE_NOAPI(ret_value);
-}
+} /* end H5F_istore_allocated() */
/*-------------------------------------------------------------------------
@@ -2487,7 +2500,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
H5F_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */
unsigned u; /*counters */
- int found = 0; /*remove this entry */
+ int found; /*remove this entry */
H5F_istore_ud1_t udata; /*B-tree pass-through */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
herr_t ret_value=SUCCEED; /* Return value */
@@ -2510,13 +2523,12 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
* and release them from the linked list raw data cache
*-------------------------------------------------------------------------
*/
+ found = 0;
for(ent = rdcc->head; ent; ent = next) {
next = ent->next;
/* Make certain we are dealing with the correct B-tree, etc */
- if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
- H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
- found = 0;
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
if((hsize_t)ent->offset[u] > curr_dims[u]) {
found = 1;
@@ -2526,17 +2538,18 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
} /* end if */
if(found) {
-#if defined (H5F_ISTORE_DEBUG)
- HDfputs("cache:remove:[", stdout);
- for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
- HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]);
- }
- HDfputs("]\n", stdout);
+#ifdef H5F_ISTORE_DEBUG
+ HDfputs("cache:remove:[", stderr);
+ for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++)
+ HDfprintf(stderr, "%s%Hd", u ? ", " : "", ent->offset[u]);
+ HDfputs("]\n", stderr);
#endif
/* Preempt the entry from the cache, but do not flush it to disk */
if(H5F_istore_preempt(f, dxpl_cache, dxpl_id, ent, FALSE) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk");
+
+ found=0;
}
}
@@ -2597,12 +2610,10 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
/* Figure out what chunks are no longer in use for the specified extent and release them */
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) {
-#if defined (H5F_ISTORE_DEBUG)
+#ifdef H5F_ISTORE_DEBUG
HDfputs("b-tree:remove:[", bt_udata->stream);
- for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) {
- HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "",
- lt_key->offset[u]);
- }
+ for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
+ HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]);
HDfputs("]\n", bt_udata->stream);
#endif
@@ -2710,6 +2721,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5S_t *space_chunk = NULL; /*dataspace for a chunk */
hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */
int srank; /*current # of dimensions (signed) */
unsigned rank; /*current # of dimensions */
int i, carry; /*counters */
@@ -2718,6 +2731,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5O_pline_t pline; /* I/O pipeline information */
H5O_fill_t fill; /* Fill value information */
H5D_fill_time_t fill_time; /* Fill time information */
+ H5D_storage_t store; /* Dataset storage information */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_initialize_by_extent, FAIL);
@@ -2747,10 +2761,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
/* Copy current dimensions */
- for(u = 0; u < rank; u++)
+ for(u = 0; u < rank; u++) {
size[u] = curr_dims[u];
+
+ /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u];
+ } /* end for */
size[u] = layout->u.chunk.dim[u];
+ /* Get the "down" sizes for each dimension */
+ if(H5V_array_down(rank,chunks,down_chunks)<0)
+ HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
+
/* Create a data space for a chunk & set the extent */
for(u = 0; u < rank; u++)
chunk_dims[u] = layout->u.chunk.dim[u];
@@ -2792,8 +2814,13 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
if(found) {
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0)
+ HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ store.chunk.offset=chunk_offset;
if(NULL == (chunk = H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &pline, &fill, fill_time,
- NULL, chunk_offset, FALSE, &idx_hint)))
+ NULL, &store, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
if(H5S_select_all(space_chunk,1) < 0)
@@ -2802,7 +2829,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
for(u = 0; u < rank; u++)
count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]);
-#if defined (H5F_ISTORE_DEBUG)
+#ifdef H5F_ISTORE_DEBUG
HDfputs("cache:initialize:offset:[", stdout);
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
@@ -2826,7 +2853,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed");
if(H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &pline, TRUE,
- chunk_offset, &idx_hint, chunk, (size_t)naccessed) < 0)
+ &store, idx_hint, chunk, (size_t)naccessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk");
} /*found */
@@ -2887,8 +2914,8 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
/* Is the chunk to be deleted this cache entry? */
if(layout->u.chunk.addr==ent->layout.u.chunk.addr)
/* Remove entry without flushing */
- if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
- HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
+ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
+ HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
} /* end for */
/* Set up user data for B-tree deletion */
@@ -2906,6 +2933,111 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_update_cache
+ *
+ * Purpose: Update any cached chunks index values after the dataspace
+ * size has changed
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, May 29, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, const H5S_t * space)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
+ H5F_rdcc_ent_t *ent, *next; /*cache entry */
+ H5F_rdcc_ent_t *old_ent; /* Old cache entry */
+ H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */
+ int srank; /*current # of dimensions (signed) */
+ unsigned rank; /*current # of dimensions */
+ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */
+ hsize_t idx; /* Chunk index */
+ hsize_t temp_idx; /* temporary index number */
+ unsigned old_idx; /* Previous index number */
+ unsigned u; /*counters */
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5F_istore_update_cache, FAIL);
+
+ /* Check args */
+ assert(f);
+ assert(layout && H5D_CHUNKED == layout->type);
+ assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ assert(space);
+
+ /* Go get the rank & dimensions */
+ if((srank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
+ H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
+
+ /* Copy current dimensions */
+ for(u = 0; u < rank; u++) {
+ /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u];
+ } /* end for */
+
+ /* Get the "down" sizes for each dimension */
+ if(H5V_array_down(rank,chunks,down_chunks)<0)
+ HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
+
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Recompute the index for each cached chunk that is in a dataset */
+ for(ent = rdcc->head; ent; ent = next) {
+ next=ent->next;
+
+ /* Make certain we are dealing with the correct B-tree, etc */
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(rank,ent->offset,layout->u.chunk.dim,down_chunks,&idx)<0)
+ HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ /* Compute the index for the chunk entry */
+ temp_idx = idx + (hsize_t)(layout->u.chunk.addr);
+ old_idx=ent->idx; /* Save for later */
+ ent->idx=H5F_HASH(f,temp_idx);
+
+ if(old_idx!=ent->idx) {
+ /* Check if there is already a chunk at this chunk's new location */
+ old_ent = rdcc->slot[ent->idx];
+ if(old_ent!=NULL) {
+ assert(old_ent->locked==0);
+
+ /* Check if we are removing the entry we would walk to next */
+ if(old_ent==next)
+ next=old_ent->next;
+
+ /* Remove the old entry from the cache */
+ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, old_ent, TRUE )<0)
+ HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
+ } /* end if */
+
+ /* Insert this chunk into correct location in hash table */
+ rdcc->slot[ent->idx]=ent;
+
+ /* Null out previous location */
+ rdcc->slot[old_idx]=NULL;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_istore_update_cache() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_dump_btree
*
* Purpose: Prints information about the storage B-tree to the specified
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index 985d935..bdbe126 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -164,7 +164,10 @@ typedef struct H5D_t H5D_t;
/* Typedef for dataset storage information */
typedef union H5D_storage_t {
H5O_efl_t efl; /* External file list information for dataset */
- hssize_t *chunk_coords; /* chunk's coordinates in file chunks */
+ struct {
+ hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */
+ hssize_t *offset; /* Chunk's coordinates in elements */
+ } chunk;
} H5D_storage_t;
/* Typedef for cached dataset transfer property list information */
diff --git a/src/H5Dseq.c b/src/H5Dseq.c
index 9bf10e7..f8ecf00 100644
--- a/src/H5Dseq.c
+++ b/src/H5Dseq.c
@@ -237,7 +237,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
case H5D_CHUNKED:
assert(store);
- if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords,
+ if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
buf))<0)
@@ -346,7 +346,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
case H5D_CHUNKED:
assert(store);
- if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords,
+ if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
buf))<0)
diff --git a/src/H5F.c b/src/H5F.c
index 63ebdde..41fb972 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -2148,7 +2148,7 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
/* Keep this ID in file object structure */
new_file->file_id = ret_value;
-
+
done:
if (ret_value<0 && new_file)
if(H5F_close(new_file)<0)
@@ -2225,7 +2225,7 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
/* Get an atom for the file */
if ((ret_value = H5I_register(H5I_FILE, new_file))<0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file handle")
-
+
/* Keep this ID in file object structure */
new_file->file_id = ret_value;
@@ -3333,7 +3333,7 @@ H5F_close(H5F_t *f)
/* Invalidate file ID */
f->file_id = -1;
-
+
/* Only flush at this point if the file will be closed */
assert(closing);
/* Dump debugging info */
diff --git a/src/H5Fistore.c b/src/H5Fistore.c
index 2434cc2..a5aeb70 100644
--- a/src/H5Fistore.c
+++ b/src/H5Fistore.c
@@ -1346,7 +1346,7 @@ static void *
H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout,
const H5O_pline_t *pline, const H5O_fill_t *fill, H5D_fill_time_t fill_time,
H5F_istore_ud1_t *udata,
- const hssize_t offset[], hbool_t relax,
+ const H5D_storage_t *store, hbool_t relax,
unsigned *idx_hint/*in,out*/)
{
int idx=0; /*hash index number */
@@ -1369,18 +1369,14 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
/* Search for the chunk in the cache */
if (rdcc->nslots>0) {
- for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) {
- temp_idx += offset[u];
- temp_idx *= layout->u.chunk.dim[u];
- }
- temp_idx += (hsize_t)(layout->u.chunk.addr);
+ temp_idx = store->chunk.index + (hsize_t)(layout->u.chunk.addr);
idx=H5F_HASH(f,temp_idx);
ent = rdcc->slot[idx];
if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) {
- if (offset[u]!=ent->offset[u]) {
+ if (store->chunk.offset[u]!=ent->offset[u]) {
found = FALSE;
break;
}
@@ -1423,7 +1419,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
* Not in the cache. Read it from the file and count this as a miss
* if it's in the file or an init if it isn't.
*/
- chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, offset, udata);
+ chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata);
} /* end else */
if (H5F_addr_defined(chunk_addr)) {
@@ -1508,7 +1504,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout);
H5O_copy(H5O_PLINE_ID, pline, &ent->pline);
for (u=0; u<layout->u.chunk.ndims; u++)
- ent->offset[u] = offset[u];
+ ent->offset[u] = store->chunk.offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
ent->chunk = chunk;
@@ -1612,7 +1608,7 @@ done:
static herr_t
H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
const H5O_layout_t *layout, const H5O_pline_t *pline, hbool_t dirty,
- const hssize_t offset[], unsigned *idx_hint, uint8_t *chunk, size_t naccessed)
+ const H5D_storage_t *store, unsigned idx_hint, uint8_t *chunk, size_t naccessed)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = NULL;
@@ -1621,13 +1617,13 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_unlock);
- if (UINT_MAX==*idx_hint) {
+ if (UINT_MAX==idx_hint) {
/*not in cache*/
} else {
- assert(*idx_hint<rdcc->nslots);
- assert(rdcc->slot[*idx_hint]);
- assert(rdcc->slot[*idx_hint]->chunk==chunk);
- found = *idx_hint;
+ assert(idx_hint<rdcc->nslots);
+ assert(rdcc->slot[idx_hint]);
+ assert(rdcc->slot[idx_hint]->chunk==chunk);
+ found = idx_hint;
}
if (found<0) {
@@ -1645,7 +1641,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
H5O_copy (H5O_LAYOUT_ID, layout, &x.layout);
H5O_copy (H5O_PLINE_ID, pline, &x.pline);
for (u=0; u<layout->u.chunk.ndims; u++)
- x.offset[u] = offset[u];
+ x.offset[u] = store->chunk.offset[u];
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
@@ -1692,7 +1688,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
*/
ssize_t
H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[],
+ const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf)
@@ -1718,7 +1714,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
#ifndef NDEBUG
for (u=0; u<layout->u.chunk.ndims; u++)
- assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
+ assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
@@ -1727,7 +1723,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
+ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
@@ -1757,7 +1753,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time,
- &udata, chunk_coords, FALSE, &idx_hint)))
+ &udata, store, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -1766,7 +1762,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, FALSE,
- chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0)
+ store, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk");
/* Set return value */
@@ -1796,7 +1792,7 @@ done:
ssize_t
H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
hid_t dxpl_id, const H5O_layout_t *layout,
- const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[],
+ const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf)
@@ -1822,7 +1818,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
#ifndef NDEBUG
for (u=0; u<layout->u.chunk.ndims; u++)
- assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
+ assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
@@ -1831,7 +1827,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
+ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
@@ -1877,7 +1873,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
relax = FALSE;
if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time,
- &udata, chunk_coords, relax, &idx_hint)))
+ &udata, store, relax, &idx_hint)))
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -1886,7 +1882,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, TRUE,
- chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0)
+ store, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk");
/* Set return value */
@@ -1967,16 +1963,33 @@ done:
*-------------------------------------------------------------------------
*/
hsize_t
-H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
+H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout)
{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
+ H5F_rdcc_ent_t *ent; /*cache entry */
+ H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */
H5F_istore_ud1_t udata;
hsize_t ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_allocated, 0);
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache")
+
+ /* Search for cached chunks that haven't been written out */
+ for(ent = rdcc->head; ent; ent = ent->next) {
+ /* Make certain we are dealing with the correct B-tree, etc */
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
+ /* Flush the chunk out to disk, to make certain the size is correct later */
+ if (H5F_istore_flush_entry(f, &dxpl_cache, dxpl_id, ent, FALSE)<0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer");
+ } /* end if */
+ } /* end for */
+
HDmemset(&udata, 0, sizeof udata);
- udata.mesg.u.chunk.ndims = ndims;
- if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0)
+ udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
+ if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
/* Set return value */
@@ -1984,7 +1997,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
done:
FUNC_LEAVE_NOAPI(ret_value);
-}
+} /* end H5F_istore_allocated() */
/*-------------------------------------------------------------------------
@@ -2487,7 +2500,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
H5F_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */
unsigned u; /*counters */
- int found = 0; /*remove this entry */
+ int found; /*remove this entry */
H5F_istore_ud1_t udata; /*B-tree pass-through */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
herr_t ret_value=SUCCEED; /* Return value */
@@ -2510,13 +2523,12 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
* and release them from the linked list raw data cache
*-------------------------------------------------------------------------
*/
+ found = 0;
for(ent = rdcc->head; ent; ent = next) {
next = ent->next;
/* Make certain we are dealing with the correct B-tree, etc */
- if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
- H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
- found = 0;
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
if((hsize_t)ent->offset[u] > curr_dims[u]) {
found = 1;
@@ -2526,17 +2538,18 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
} /* end if */
if(found) {
-#if defined (H5F_ISTORE_DEBUG)
- HDfputs("cache:remove:[", stdout);
- for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
- HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]);
- }
- HDfputs("]\n", stdout);
+#ifdef H5F_ISTORE_DEBUG
+ HDfputs("cache:remove:[", stderr);
+ for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++)
+ HDfprintf(stderr, "%s%Hd", u ? ", " : "", ent->offset[u]);
+ HDfputs("]\n", stderr);
#endif
/* Preempt the entry from the cache, but do not flush it to disk */
if(H5F_istore_preempt(f, dxpl_cache, dxpl_id, ent, FALSE) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk");
+
+ found=0;
}
}
@@ -2597,12 +2610,10 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
/* Figure out what chunks are no longer in use for the specified extent and release them */
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) {
-#if defined (H5F_ISTORE_DEBUG)
+#ifdef H5F_ISTORE_DEBUG
HDfputs("b-tree:remove:[", bt_udata->stream);
- for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) {
- HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "",
- lt_key->offset[u]);
- }
+ for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
+ HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]);
HDfputs("]\n", bt_udata->stream);
#endif
@@ -2710,6 +2721,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5S_t *space_chunk = NULL; /*dataspace for a chunk */
hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */
int srank; /*current # of dimensions (signed) */
unsigned rank; /*current # of dimensions */
int i, carry; /*counters */
@@ -2718,6 +2731,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5O_pline_t pline; /* I/O pipeline information */
H5O_fill_t fill; /* Fill value information */
H5D_fill_time_t fill_time; /* Fill time information */
+ H5D_storage_t store; /* Dataset storage information */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_initialize_by_extent, FAIL);
@@ -2747,10 +2761,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
/* Copy current dimensions */
- for(u = 0; u < rank; u++)
+ for(u = 0; u < rank; u++) {
size[u] = curr_dims[u];
+
+ /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u];
+ } /* end for */
size[u] = layout->u.chunk.dim[u];
+ /* Get the "down" sizes for each dimension */
+ if(H5V_array_down(rank,chunks,down_chunks)<0)
+ HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
+
/* Create a data space for a chunk & set the extent */
for(u = 0; u < rank; u++)
chunk_dims[u] = layout->u.chunk.dim[u];
@@ -2792,8 +2814,13 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
if(found) {
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0)
+ HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ store.chunk.offset=chunk_offset;
if(NULL == (chunk = H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &pline, &fill, fill_time,
- NULL, chunk_offset, FALSE, &idx_hint)))
+ NULL, &store, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
if(H5S_select_all(space_chunk,1) < 0)
@@ -2802,7 +2829,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
for(u = 0; u < rank; u++)
count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]);
-#if defined (H5F_ISTORE_DEBUG)
+#ifdef H5F_ISTORE_DEBUG
HDfputs("cache:initialize:offset:[", stdout);
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
@@ -2826,7 +2853,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed");
if(H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &pline, TRUE,
- chunk_offset, &idx_hint, chunk, (size_t)naccessed) < 0)
+ &store, idx_hint, chunk, (size_t)naccessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk");
} /*found */
@@ -2887,8 +2914,8 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
/* Is the chunk to be deleted this cache entry? */
if(layout->u.chunk.addr==ent->layout.u.chunk.addr)
/* Remove entry without flushing */
- if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
- HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
+ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
+ HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
} /* end for */
/* Set up user data for B-tree deletion */
@@ -2906,6 +2933,111 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_update_cache
+ *
+ * Purpose: Update any cached chunks index values after the dataspace
+ * size has changed
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, May 29, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, const H5S_t * space)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */
+ H5F_rdcc_ent_t *ent, *next; /*cache entry */
+ H5F_rdcc_ent_t *old_ent; /* Old cache entry */
+ H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */
+ int srank; /*current # of dimensions (signed) */
+ unsigned rank; /*current # of dimensions */
+ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */
+ hsize_t idx; /* Chunk index */
+ hsize_t temp_idx; /* temporary index number */
+ unsigned old_idx; /* Previous index number */
+ unsigned u; /*counters */
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5F_istore_update_cache, FAIL);
+
+ /* Check args */
+ assert(f);
+ assert(layout && H5D_CHUNKED == layout->type);
+ assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ assert(space);
+
+ /* Go get the rank & dimensions */
+ if((srank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
+ H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
+
+ /* Copy current dimensions */
+ for(u = 0; u < rank; u++) {
+ /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u];
+ } /* end for */
+
+ /* Get the "down" sizes for each dimension */
+ if(H5V_array_down(rank,chunks,down_chunks)<0)
+ HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
+
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Recompute the index for each cached chunk that is in a dataset */
+ for(ent = rdcc->head; ent; ent = next) {
+ next=ent->next;
+
+ /* Make certain we are dealing with the correct B-tree, etc */
+ if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(rank,ent->offset,layout->u.chunk.dim,down_chunks,&idx)<0)
+ HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ /* Compute the index for the chunk entry */
+ temp_idx = idx + (hsize_t)(layout->u.chunk.addr);
+ old_idx=ent->idx; /* Save for later */
+ ent->idx=H5F_HASH(f,temp_idx);
+
+ if(old_idx!=ent->idx) {
+ /* Check if there is already a chunk at this chunk's new location */
+ old_ent = rdcc->slot[ent->idx];
+ if(old_ent!=NULL) {
+ assert(old_ent->locked==0);
+
+ /* Check if we are removing the entry we would walk to next */
+ if(old_ent==next)
+ next=old_ent->next;
+
+ /* Remove the old entry from the cache */
+ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, old_ent, TRUE )<0)
+ HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
+ } /* end if */
+
+ /* Insert this chunk into correct location in hash table */
+ rdcc->slot[ent->idx]=ent;
+
+ /* Null out previous location */
+ rdcc->slot[old_idx]=NULL;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_istore_update_cache() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_dump_btree
*
* Purpose: Prints information about the storage B-tree to the specified
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 8160a98..94655f9 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -189,6 +189,7 @@ struct H5F_t {
/* Forward declarations for prototype arguments */
struct H5D_dxpl_cache_t;
struct H5D_dcpl_cache_t;
+union H5D_storage_t;
/* Private functions, not part of the publicly documented API */
#ifdef NOT_YET
@@ -204,14 +205,14 @@ H5_DLL herr_t H5F_istore_dest (H5F_t *f, hid_t dxpl_id);
H5_DLL ssize_t H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
hid_t dxpl_id,
const struct H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache,
- hssize_t chunk_coords[],
+ const union H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
H5_DLL ssize_t H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
hid_t dxpl_id,
const struct H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache,
- hssize_t chunk_coords[],
+ const union H5D_storage_t *store,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index addb247..c16c9f1 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -447,7 +447,7 @@ H5_DLL herr_t H5F_istore_create(H5F_t *f, hid_t dxpl_id,
H5_DLL herr_t H5F_istore_allocate (H5F_t *f, hid_t dxpl_id,
const struct H5O_layout_t *layout, const hsize_t *space_dim,
struct H5P_genplist_t *dc_plist, hbool_t full_overwrite);
-H5_DLL hsize_t H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr);
+H5_DLL hsize_t H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout);
H5_DLL herr_t H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims,
haddr_t addr);
H5_DLL herr_t H5F_istore_prune_by_extent( H5F_t *f,
@@ -459,6 +459,8 @@ H5_DLL herr_t H5F_istore_initialize_by_extent( H5F_t *f,
const struct H5S_t *space );
H5_DLL herr_t H5F_istore_delete(H5F_t *f, hid_t dxpl_id,
const struct H5O_layout_t *layout);
+H5_DLL herr_t H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout,
+ const struct H5S_t * space);
/* Address-related functions */
H5_DLL void H5F_addr_encode(const H5F_t *, uint8_t** /*in,out*/, haddr_t);
diff --git a/src/H5Fseq.c b/src/H5Fseq.c
index 9bf10e7..f8ecf00 100644
--- a/src/H5Fseq.c
+++ b/src/H5Fseq.c
@@ -237,7 +237,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
case H5D_CHUNKED:
assert(store);
- if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords,
+ if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
buf))<0)
@@ -346,7 +346,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
case H5D_CHUNKED:
assert(store);
- if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords,
+ if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
buf))<0)
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 6520877..63cac3c 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -35,10 +35,10 @@
#include "H5Spublic.h" /* Dataspace functions */
/* Private headers needed by this file */
-#include "H5private.h" /* Generic functions */
-#include "H5HGprivate.h" /* Global heap functions */
-#include "H5Tprivate.h" /* Datatype functions */
-#include "H5Zprivate.h" /* I/O pipeline filters */
+#include "H5private.h" /* Generic functions */
+#include "H5HGprivate.h" /* Global heap functions */
+#include "H5Tprivate.h" /* Datatype functions */
+#include "H5Zprivate.h" /* I/O pipeline filters */
/* Object header macros */
#define H5O_MIN_SIZE H5O_ALIGN(32) /*min obj header data size */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index d2b57df..ce2e227 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -747,8 +747,7 @@ H5Pget_filter(hid_t plist_id, unsigned idx, unsigned int *flags/*out*/,
#endif /* H5_WANT_H5_V1_6_COMPAT */
/* Check args */
- if (cd_nelmts || cd_values)
-{
+ if (cd_nelmts || cd_values) {
if (cd_nelmts && *cd_nelmts>256)
/*
* It's likely that users forget to initialize this on input, so
@@ -1641,4 +1640,3 @@ H5Premove_filter(hid_t plist_id, H5Z_filter_t filter)
done:
FUNC_LEAVE_API(ret_value);
}
-
diff --git a/src/H5Z.c b/src/H5Z.c
index 9de1966..cc3e3f5 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -1159,4 +1159,3 @@ H5Z_delete(H5O_pline_t *pline, H5Z_filter_t filter)
done:
FUNC_LEAVE_NOAPI(ret_value)
}
-
diff --git a/test/dsets.c b/test/dsets.c
index 79ff80b..0c9c18d 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -1272,6 +1272,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
{
hid_t dataset; /* Dataset ID */
hid_t dxpl; /* Dataset xfer property list ID */
+ hid_t write_dxpl; /* Dataset xfer property list ID for writing */
hid_t sid; /* Dataspace ID */
const hsize_t size[2] = {DSET_DIM1, DSET_DIM2}; /* Dataspace dimensions */
const hssize_t hs_offset[2] = {FILTER_HS_OFFSET1, FILTER_HS_OFFSET2}; /* Hyperslab offset */
@@ -1290,6 +1291,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
if ((dxpl = H5Pcreate (H5P_DATASET_XFER))<0) goto error;
tconv_buf = malloc (1000);
if (H5Pset_buffer (dxpl, (size_t)1000, tconv_buf, NULL)<0) goto error;
+ if ((write_dxpl = H5Pcopy (dxpl))<0) TEST_ERROR;
if (if_fletcher32==DISABLE_FLETCHER32) {
if(H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC)<0)
@@ -1347,10 +1349,10 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
}
}
- if (H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, points)<0)
- goto error;
+ if (H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points)<0)
+ TEST_ERROR;
- if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+ if((*dset_size=H5Dget_storage_size(dataset))==0) TEST_ERROR;
PASSED();
@@ -1363,25 +1365,27 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
/* Read the dataset back */
if(corrupted) {
/* Default behavior is failure when data is corrupted. */
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
/* Callback decides to continue inspite data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) TEST_ERROR;
if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Callback decides to fail when data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ if(H5Pset_filter_callback(write_dxpl, filter_cb_fail, NULL)<0) TEST_ERROR;
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
} else {
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Check that the values read are the same as the values written */
for (i=0; i<size[0]; i++) {
@@ -1414,31 +1418,33 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
points[i][j] = (int)HDrandom ();
}
}
- if (H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, points)<0)
- goto error;
+ if (H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points)<0)
+ TEST_ERROR;
if(corrupted) {
/* Default behavior is failure when data is corrupted. */
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
/* Callback decides to continue inspite data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) TEST_ERROR;
if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Callback decides to fail when data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ if(H5Pset_filter_callback(write_dxpl, filter_cb_fail, NULL)<0) TEST_ERROR;
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
} else {
/* Read the dataset back and check it */
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Check that the values read are the same as the values written */
for (i=0; i<size[0]; i++) {
@@ -1454,7 +1460,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
}
}
- if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+ if((*dset_size=H5Dget_storage_size(dataset))==0) TEST_ERROR;
PASSED();
/*----------------------------------------------------------------------
@@ -1465,30 +1471,32 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
*/
TESTING(" filters (re-open)");
- if (H5Dclose (dataset)<0) goto error;
- if ((dataset = H5Dopen (fid, name))<0) goto error;
+ if (H5Dclose (dataset)<0) TEST_ERROR;
+ if ((dataset = H5Dopen (fid, name))<0) TEST_ERROR;
if(corrupted) {
/* Default behavior is failure when data is corrupted. */
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
/* Callback decides to continue inspite data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) TEST_ERROR;
if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Callback decides to fail when data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ if(H5Pset_filter_callback(write_dxpl, filter_cb_fail, NULL)<0) TEST_ERROR;
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
} else {
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Check that the values read are the same as the values written */
for (i=0; i<size[0]; i++) {
@@ -1521,31 +1529,34 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
}
}
if (H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size,
- NULL)<0) goto error;
+ NULL)<0) TEST_ERROR;
+ /* (Use the "read" DXPL because partial I/O on corrupted data test needs to ignore errors during writing) */
if (H5Dwrite (dataset, H5T_NATIVE_INT, sid, sid, dxpl, points)<0)
- goto error;
+ TEST_ERROR;
if(corrupted) {
/* Default behavior is failure when data is corrupted. */
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
/* Callback decides to continue inspite data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) TEST_ERROR;
if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Callback decides to fail when data is corrupted. */
- if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ if(H5Pset_filter_callback(write_dxpl, filter_cb_fail, NULL)<0) TEST_ERROR;
+ /* (Use the "write" DXPL in order to make certain corruption is seen) */
H5E_BEGIN_TRY {
- status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, check);
} H5E_END_TRY;
- if(status>=0) goto error;
+ if(status>=0) TEST_ERROR;
} else {
if (H5Dread (dataset, H5T_NATIVE_INT, sid, sid, dxpl, check)<0)
- goto error;
+ TEST_ERROR;
/* Check that the values read are the same as the values written */
for (i=0; i<hs_size[0]; i++) {
@@ -1687,7 +1698,7 @@ test_filters(hid_t file)
if (H5Zregister (H5Z_CORRUPT)<0) goto error;
if (H5Pset_filter (dc, H5Z_FILTER_CORRUPT, 0, 3, data_corrupt)<0) goto error;
- if(test_filter_internal(file,DSET_FLETCHER32_NAME_3,dc,ENABLE_FLETCHER32,DATA_CORRUPTED,&fletcher32_size)<0) goto error;
+ if(test_filter_internal(file,DSET_FLETCHER32_NAME_3,dc,DISABLE_FLETCHER32,DATA_CORRUPTED,&fletcher32_size)<0) goto error;
if(fletcher32_size<=null_size) {
H5_FAILED();
puts(" Size after checksumming is incorrect.");
@@ -1954,7 +1965,7 @@ test_missing_filter(hid_t file)
/* Query the dataset's size on disk */
if((dset_size=H5Dget_storage_size(dsid))==0) {
H5_FAILED();
- printf(" Line %d: Error querying dataset size\n",__LINE__);
+ printf(" Line %d: Error querying dataset size, dset_size=%lu\n",__LINE__,(unsigned long)dset_size);
goto error;
} /* end if */
@@ -3348,6 +3359,7 @@ test_filters_endianess(void)
if (H5Dclose (dsid)<0) goto error;
if (H5Sclose (sid)<0) goto error;
if (H5Fclose (fid)<0) goto error;
+
/*-------------------------------------------------------------------------
* step 2: open a file written on a little-endian machine in step 1
*-------------------------------------------------------------------------
@@ -3371,6 +3383,7 @@ test_filters_endianess(void)
/* close */
if (H5Fclose(fid)<0) goto error;
+
/*-------------------------------------------------------------------------
* step 3: open a file written on a big-endian machine in step 1
*-------------------------------------------------------------------------
@@ -3432,25 +3445,26 @@ int
main(void)
{
hid_t file, grp, fapl;
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
int nerrors=0;
char filename[1024];
h5_reset();
fapl = h5_fileaccess();
-#if 0
- {
- /* Turn off raw data cache */
- int mdc_nelmts;
- if (H5Pget_cache(fapl, &mdc_nelmts, NULL, NULL, NULL)<0) goto error;
- if (H5Pset_cache(fapl, mdc_nelmts, 0, 0, 0.0)<0) goto error;
- }
-#endif
-
/* Set the random # seed */
HDsrandom((unsigned long)HDtime(NULL));
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+
+ /* Turn off the chunk cache, so all the chunks are immediately written to disk */
+ if(H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0)<0) goto error;
+ rdcc_nbytes=0;
+ if(H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0)<0) goto error;
+
if ((file=H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl))<0) {
goto error;
}
@@ -3460,6 +3474,7 @@ main(void)
if (H5Gset_comment(grp, ".", "Causes diagnostic messages to be emitted")<0)
goto error;
if (H5Gclose (grp)<0) goto error;
+
nerrors += test_create(file)<0 ?1:0;
nerrors += test_simple_io(fapl)<0 ?1:0;
nerrors += test_compact_io(fapl)<0 ?1:0;
diff --git a/test/set_extent.c b/test/set_extent.c
index 067dabc..845d621 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -67,17 +67,17 @@ int main( void )
/* Create a new file using default properties. */
- if ((file_id = H5Fcreate( "set_extent_create.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT ))<0) goto out;
+ if ((file_id = H5Fcreate( "set_extent_create.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT ))<0) TEST_ERROR;
TESTING("extend dataset create with fill value");
/* Create the data space with unlimited dimensions. */
- if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) goto out;
+ if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) TEST_ERROR;
/* Modify dataset creation properties, i.e. enable chunking. */
- if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) goto out;
- if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) goto out;
- if (H5Pset_fill_value( plist_id, H5T_NATIVE_INT, &fillvalue )<0) goto out;
+ if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) TEST_ERROR;
+ if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) TEST_ERROR;
+ if (H5Pset_fill_value( plist_id, H5T_NATIVE_INT, &fillvalue )<0) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -86,10 +86,10 @@ int main( void )
*/
/* Create a new dataset */
- if ((dataset_id = H5Dcreate( file_id , "Dataset1", H5T_NATIVE_INT, space_id, plist_id ))<0) goto out;
+ if ((dataset_id = H5Dcreate( file_id , "Dataset1", H5T_NATIVE_INT, space_id, plist_id ))<0) TEST_ERROR;
/* Write the data. */
- if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) goto out;
+ if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) TEST_ERROR;
/*-------------------------------------------------------------------------
* Set new dimensions for the array; shrink it
@@ -97,15 +97,16 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims_new )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims_new )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims_new[0] ) goto out;
+ if ( dims_out[0] != dims_new[0] ) TEST_ERROR;
+ if ( dims_out[1] != dims_new[1] ) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -114,13 +115,17 @@ int main( void )
*/
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ )
for( j = 0; j < (int)dims_out[1]; j++ )
- if ( buf1[i][j] != data[i][j] ) goto out;
+ if ( buf1[i][j] != data[i][j] ) {
+ printf("buf1[%d][%d]=%d\n",i,j,buf1[i][j]);
+ printf("data[%d][%d]=%d\n",i,j,data[i][j]);
+ TEST_ERROR;
+ } /* end if */
/*-------------------------------------------------------------------------
@@ -129,15 +134,15 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims[0] ) goto out;
+ if ( dims_out[0] != dims[0] ) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -146,16 +151,16 @@ int main( void )
*/
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ ) {
for( j = 0; j < (int)dims_out[1]; j++ ) {
if ( i >= 70 || j >= 70 ) {
- if ( buf2[i][j] != fillvalue ) goto out;
+ if ( buf2[i][j] != fillvalue ) TEST_ERROR;
}
else {
- if ( buf2[i][j] != data[i][j] ) goto out;
+ if ( buf2[i][j] != data[i][j] ) TEST_ERROR;
}
}
}
@@ -174,12 +179,12 @@ int main( void )
TESTING("extend dataset create without fill value");
/* Create the data space with unlimited dimensions. */
- if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) goto out;
+ if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) TEST_ERROR;
/* Modify dataset creation properties, i.e. enable chunking. */
- if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) goto out;
- if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) goto out;
- if (H5Pset_fill_time( plist_id, H5D_FILL_TIME_ALLOC)<0) goto out;
+ if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) TEST_ERROR;
+ if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) TEST_ERROR;
+ if (H5Pset_fill_time( plist_id, H5D_FILL_TIME_ALLOC)<0) TEST_ERROR;
/*-------------------------------------------------------------------------
* Create and write one dataset
@@ -187,10 +192,10 @@ int main( void )
*/
/* Create a new dataset */
- if ((dataset_id = H5Dcreate( file_id , "Dataset2", H5T_NATIVE_INT, space_id, plist_id ))<0) goto out;
+ if ((dataset_id = H5Dcreate( file_id , "Dataset2", H5T_NATIVE_INT, space_id, plist_id ))<0) TEST_ERROR;
/* Write the data. */
- if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) goto out;
+ if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) TEST_ERROR;
/*-------------------------------------------------------------------------
* Set new dimensions for the array; shrink it
@@ -198,15 +203,15 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims_new )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims_new )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims_new[0] ) goto out;
+ if ( dims_out[0] != dims_new[0] ) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -215,13 +220,13 @@ int main( void )
*/
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ )
for( j = 0; j < (int)dims_out[1]; j++ )
- if ( buf1[i][j] != data[i][j] ) goto out;
+ if ( buf1[i][j] != data[i][j] ) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -230,15 +235,15 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims[0] ) goto out;
+ if ( dims_out[0] != dims[0] ) TEST_ERROR;
/*-------------------------------------------------------------------------
@@ -247,16 +252,16 @@ int main( void )
*/
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ ) {
for( j = 0; j < (int)dims_out[1]; j++ ) {
if ( i >= 70 || j >= 70 ) {
- if ( buf2[i][j] != 0 ) goto out;
+ if ( buf2[i][j] != 0 ) TEST_ERROR;
}
else {
- if ( buf2[i][j] != data[i][j] ) goto out;
+ if ( buf2[i][j] != data[i][j] ) TEST_ERROR;
}
}
}
@@ -283,32 +288,32 @@ int main( void )
*/
/* Create a file creation property list */
- if((fcpl = H5Pcreate(H5P_FILE_CREATE))<0) goto out;
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE))<0) TEST_ERROR;
/* Set non-default indexed storage B-tree internal 'K' value */
- if(H5Pset_istore_k(fcpl,ISTORE_IK)<0) goto out;
+ if(H5Pset_istore_k(fcpl,ISTORE_IK)<0) TEST_ERROR;
/* Create a new file using properties. */
- if ((file_id = H5Fcreate( "set_extent_read.h5", H5F_ACC_TRUNC, fcpl, H5P_DEFAULT ))<0) goto out;
+ if ((file_id = H5Fcreate( "set_extent_read.h5", H5F_ACC_TRUNC, fcpl, H5P_DEFAULT ))<0) TEST_ERROR;
/* Close property list */
- if(H5Pclose(fcpl)<0) goto out;
+ if(H5Pclose(fcpl)<0) TEST_ERROR;
TESTING("extend dataset read with fill value");
/* Create the data space with unlimited dimensions. */
- if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) goto out;
+ if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) TEST_ERROR;
/* Modify dataset creation properties, i.e. enable chunking. */
- if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) goto out;
- if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) goto out;
- if (H5Pset_fill_value( plist_id, H5T_NATIVE_INT, &fillvalue )<0) goto out;
+ if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) TEST_ERROR;
+ if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) TEST_ERROR;
+ if (H5Pset_fill_value( plist_id, H5T_NATIVE_INT, &fillvalue )<0) TEST_ERROR;
/* Create a new dataset within the file using cparms creation properties. */
- if ((dataset_id = H5Dcreate( file_id , "Dataset1", H5T_NATIVE_INT, space_id, plist_id ))<0) goto out;
+ if ((dataset_id = H5Dcreate( file_id , "Dataset1", H5T_NATIVE_INT, space_id, plist_id ))<0) TEST_ERROR;
/* Write the data. */
- if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) goto out;
+ if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) TEST_ERROR;
/* Close/release resources. */
H5Dclose( dataset_id );
@@ -318,29 +323,29 @@ int main( void )
/* Open the file */
- if ((file_id = H5Fopen( "set_extent_read.h5", H5F_ACC_RDWR, H5P_DEFAULT ))<0) goto out;
+ if ((file_id = H5Fopen( "set_extent_read.h5", H5F_ACC_RDWR, H5P_DEFAULT ))<0) TEST_ERROR;
/* Open the dataset */
- if ((dataset_id = H5Dopen( file_id , "Dataset1" ))<0) goto out;
+ if ((dataset_id = H5Dopen( file_id , "Dataset1" ))<0) TEST_ERROR;
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id, dims_new )<0) goto out;
+ if (H5Dset_extent( dataset_id, dims_new )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims_new[0] ) goto out;
+ if ( dims_out[0] != dims_new[0] ) TEST_ERROR;
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ )
for( j = 0; j < (int)dims_out[1]; j++ )
- if ( buf1[i][j] != data[i][j] ) goto out;
+ if ( buf1[i][j] != data[i][j] ) TEST_ERROR;
/*-------------------------------------------------------------------------
* Set new dimensions for the array; expand it again
@@ -348,27 +353,27 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims[0] ) goto out;
+ if ( dims_out[0] != dims[0] ) TEST_ERROR;
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ ) {
for( j = 0; j < (int)dims_out[1]; j++ ) {
if ( i >= 70 || j >= 70 ) {
- if ( buf2[i][j] != fillvalue ) goto out;
+ if ( buf2[i][j] != fillvalue ) TEST_ERROR;
}
else {
- if ( buf2[i][j] != data[i][j] ) goto out;
+ if ( buf2[i][j] != data[i][j] ) TEST_ERROR;
}
}
}
@@ -388,18 +393,18 @@ int main( void )
TESTING("extend dataset read without fill value");
/* Create the data space with unlimited dimensions. */
- if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) goto out;
+ if ((space_id = H5Screate_simple( RANK, dims, maxdims ))<0) TEST_ERROR;
/* Modify dataset creation properties, i.e. enable chunking. */
- if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) goto out;
- if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) goto out;
- if (H5Pset_fill_time( plist_id, H5D_FILL_TIME_ALLOC)<0) goto out;
+ if ((plist_id = H5Pcreate (H5P_DATASET_CREATE ))<0) TEST_ERROR;
+ if (H5Pset_chunk( plist_id, RANK, dims_chunk )<0) TEST_ERROR;
+ if (H5Pset_fill_time( plist_id, H5D_FILL_TIME_ALLOC)<0) TEST_ERROR;
/* Create a new dataset within the file using cparms creation properties. */
- if ((dataset_id = H5Dcreate( file_id , "Dataset2", H5T_NATIVE_INT, space_id, plist_id ))<0) goto out;
+ if ((dataset_id = H5Dcreate( file_id , "Dataset2", H5T_NATIVE_INT, space_id, plist_id ))<0) TEST_ERROR;
/* Write the data. */
- if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) goto out;
+ if (H5Dwrite( dataset_id , H5T_NATIVE_INT, space_id, H5S_ALL, H5P_DEFAULT, data )<0) TEST_ERROR;
/* Close/release resources. */
H5Dclose( dataset_id );
@@ -409,29 +414,29 @@ int main( void )
/* Open the file */
- if ((file_id = H5Fopen( "set_extent_read.h5", H5F_ACC_RDWR, H5P_DEFAULT ))<0) goto out;
+ if ((file_id = H5Fopen( "set_extent_read.h5", H5F_ACC_RDWR, H5P_DEFAULT ))<0) TEST_ERROR;
/* Open the dataset */
- if ((dataset_id = H5Dopen( file_id , "Dataset2" ))<0) goto out;
+ if ((dataset_id = H5Dopen( file_id , "Dataset2" ))<0) TEST_ERROR;
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id, dims_new )<0) goto out;
+ if (H5Dset_extent( dataset_id, dims_new )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims_new[0] ) goto out;
+ if ( dims_out[0] != dims_new[0] ) TEST_ERROR;
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ )
for( j = 0; j < (int)dims_out[1]; j++ )
- if ( buf1[i][j] != data[i][j] ) goto out;
+ if ( buf1[i][j] != data[i][j] ) TEST_ERROR;
/*-------------------------------------------------------------------------
* Set new dimensions for the array; expand it again
@@ -439,27 +444,27 @@ int main( void )
*/
/* Set new dimensions for the array. */
- if (H5Dset_extent( dataset_id , dims )<0) goto out;
+ if (H5Dset_extent( dataset_id , dims )<0) TEST_ERROR;
/* Get the space. */
- if ((space_id = H5Dget_space( dataset_id ))<0) goto out;
+ if ((space_id = H5Dget_space( dataset_id ))<0) TEST_ERROR;
/* Get dimensions. */
- if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) goto out;
+ if (H5Sget_simple_extent_dims( space_id, dims_out, NULL )<0) TEST_ERROR;
- if ( dims_out[0] != dims[0] ) goto out;
+ if ( dims_out[0] != dims[0] ) TEST_ERROR;
/* Read the new dataset. */
- if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) goto out;
+ if (H5Dread( dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2 )<0) TEST_ERROR;
/* Compare the read array with the original array */
for( i = 0; i < (int)dims_out[0]; i++ ) {
for( j = 0; j < (int)dims_out[1]; j++ ) {
if ( i >= 70 || j >= 70 ) {
- if ( buf2[i][j] != 0 ) goto out;
+ if ( buf2[i][j] != 0 ) TEST_ERROR;
}
else {
- if ( buf2[i][j] != data[i][j] ) goto out;
+ if ( buf2[i][j] != data[i][j] ) TEST_ERROR;
}
}
}
@@ -482,7 +487,7 @@ int main( void )
return 0;
-out:
+error:
H5Dclose( dataset_id );
H5Sclose( space_id );
H5Pclose( plist_id );
diff --git a/test/tmisc.c b/test/tmisc.c
index b1d1691..a6823d0 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -229,8 +229,8 @@ unsigned m13_rdata[MISC13_DIM1][MISC13_DIM2]; /* Data read from dataset
#define MISC20_DSET_NAME "Dataset"
#define MISC20_DSET2_NAME "Dataset2"
#define MISC20_SPACE_RANK 2
-#define MISC20_SPACE_DIM0 (8*1024*1024*1024ULL)
-#define MISC20_SPACE_DIM1 ((4*1024*1024*1024ULL)+1ULL)
+#define MISC20_SPACE_DIM0 (8*1024*1024*(uint64_t)1024)
+#define MISC20_SPACE_DIM1 ((4*1024*1024*(uint64_t)1024)+1)
#define MISC20_SPACE2_DIM0 8
#define MISC20_SPACE2_DIM1 4