summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobb Matzke <matzke@llnl.gov>1998-10-05 21:01:10 (GMT)
committerRobb Matzke <matzke@llnl.gov>1998-10-05 21:01:10 (GMT)
commit21943f5d35f98b35c3c0182d56f81b4854269dd6 (patch)
treea5024e9eb5ca6a40f07ae224fcf49d3f4782785c
parent58ee080e72f4aa1e10bc7c3fcf5f6499aaad18e7 (diff)
downloadhdf5-21943f5d35f98b35c3c0182d56f81b4854269dd6.zip
hdf5-21943f5d35f98b35c3c0182d56f81b4854269dd6.tar.gz
hdf5-21943f5d35f98b35c3c0182d56f81b4854269dd6.tar.bz2
[svn-r735] Changes since 19981002
---------------------- ./src/H5D.c Fill values are working for contiguous datasets now except there are two things that need more support from the data space layer, specifically the ability to form a selection from the difference of two selections. They are (1) extending an external contiguous dataset, (2) optimization by delaying the fill until after the first H5Dwrite(). Renamed H5D_allocate() to H5D_init_storage() since allocation is only part of the story. Added a data space argument so it doesn't have to query the space from the object header -- the space is always available in the caller anyway. Removed `#ifdef HAVE_PARALLEL' from a few places where it wasn't necessary. We don't need it around code that doesn't compile anything from mpi.h or mpio.h. ./src/H5Fistore.c Uncommented H5F_istore_alloc() for non-parallel and moved the `#ifdef HAVE_PARALLEL' just around Kim's barrier. ./src/H5Fmpio.c Wrapped a couple long lines. Got rid of two signed vs. unsigned comparison warnings. ./MANIFEST ./test/Makefile.in ./test/fillval.c [NEW] Added tests for fill values. The contiguous dataset extend test is disabled until H5S_SELECT_DIFF is implemented. ./tools/Makefile.in Fixed a bug where `make test' didn't build the executables first. This should cause the snapshots to start up again. ./Makefile.in Changed to build in `test' directory before `tools' directory. We want the library tests to pass before we even start considering the tools. You can still build and/or test the tools independent of the library tests passing.
-rw-r--r--MANIFEST1
-rw-r--r--Makefile.in2
-rw-r--r--README2
-rw-r--r--src/H5D.c254
-rw-r--r--src/H5Distore.c16
-rw-r--r--src/H5Fistore.c16
-rw-r--r--src/H5Fmpio.c13
-rw-r--r--test/Makefile.in10
-rw-r--r--test/fillval.c731
-rw-r--r--tools/Makefile.in3
10 files changed, 920 insertions, 128 deletions
diff --git a/MANIFEST b/MANIFEST
index 86ae68f..0f09500 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -289,6 +289,7 @@
./test/dtypes.c
./test/extend.c
./test/external.c
+./test/fillval.c
./test/gheap.c
./test/hyperslab.c
./test/iopipe.c
diff --git a/Makefile.in b/Makefile.in
index 17757e8..bb94c74 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -11,7 +11,7 @@
@COMMENCE@
# Subdirectories in build-order
-SUBDIRS=src tools test
+SUBDIRS=src test tools
##############################################################################
# T A R G E T S
diff --git a/README b/README
index 6387d9d..baf5150 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-This is hdf5-1.1.10 released on Wed Sep 30 13:33:34 CDT 1998
+This is hdf5-1.1.10 released on Mon Oct 5 15:55:59 CDT 1998
Please refer to the INSTALL file for installation instructions.
------------------------------------------------------------------------------
diff --git a/src/H5D.c b/src/H5D.c
index d8c2a01..dd97c27 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -28,7 +28,8 @@ static char RcsId[] = "@(#)$Revision$";
#include <H5Oprivate.h> /* Object headers */
#include <H5Pprivate.h> /* Property lists */
#include <H5Sprivate.h> /* Dataspace functions rky 980813 */
-#include <H5TBprivate.h> /* Temporary buffers */
+#include <H5TBprivate.h> /* Temporary buffers */
+#include <H5Vprivate.h> /* Vector and array functions */
#include <H5Zprivate.h> /* Data filters */
#ifdef QAK
@@ -98,9 +99,7 @@ static hbool_t interface_initialize_g = FALSE;
#define INTERFACE_INIT H5D_init_interface
static herr_t H5D_init_interface(void);
static void H5D_term_interface(void);
-#ifdef HAVE_PARALLEL
-static herr_t H5D_allocate (H5D_t *dataset, const H5D_xfer_t *xfer);
-#endif
+static herr_t H5D_init_storage(H5D_t *dataset, const H5S_t *space);
/*--------------------------------------------------------------------------
@@ -932,7 +931,8 @@ H5D_create(H5G_entry_t *loc, const char *name, const H5T_t *type,
* Chunked storage allows any type of data space extension, so we
* don't even bother checking.
*/
- if (new_dset->create_parms->chunk_ndims != H5S_get_simple_extent_ndims(space)) {
+ if (new_dset->create_parms->chunk_ndims !=
+ H5S_get_simple_extent_ndims(space)) {
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL,
"dimensionality of chunks doesn't match the data space");
}
@@ -1030,25 +1030,17 @@ H5D_create(H5G_entry_t *loc, const char *name, const H5T_t *type,
}
}
+ /* Initialize the raw data */
+ if (H5D_init_storage(new_dset, space)<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL,
+ "unable to initialize storage");
+ }
+
/* Give the dataset a name */
if (H5G_insert(loc, name, &(new_dset->ent)) < 0) {
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to name dataset");
}
-#ifdef HAVE_PARALLEL
- /*
- * If the dataset uses chunk storage and is accessed via
- * parallel I/O, allocate file space for all chunks now.
- */
- if (new_dset->ent.file->shared->access_parms->driver == H5F_LOW_MPIO &&
- new_dset->layout.type == H5D_CHUNKED){
- if (H5D_allocate(new_dset, &H5D_xfer_dflt)<0) {
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL,
- "fail in file space allocation for chunks");
- }
- }
-#endif /* HAVE_PARALLEL */
-
/* Success */
ret_value = new_dset;
@@ -1193,22 +1185,18 @@ H5D_open(H5G_entry_t *loc, const char *name)
"storage address is undefined an no external file list");
}
-#ifdef HAVE_PARALLEL
/*
- * If the dataset uses chunk storage and is accessed via
- * parallel I/O, and file is open writable,
- * allocate file space for chunks that have not been
- * allocated in its "previous access".
+ * Make sure all storage is properly initialized for chunked datasets.
+ * This is especially important for parallel I/O where the B-tree must
+ * be fully populated before I/O can happen.
*/
- if (dataset->ent.file->shared->access_parms->driver==H5F_LOW_MPIO &&
- dataset->layout.type == H5D_CHUNKED &&
- (dataset->ent.file->intent & H5F_ACC_RDWR)){
- if (H5D_allocate(dataset, &H5D_xfer_dflt)<0) {
+ if ((dataset->ent.file->intent & H5F_ACC_RDWR) &&
+ H5D_CHUNKED==dataset->layout.type) {
+ if (H5D_init_storage(dataset, space)<0) {
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL,
- "fail in file space allocation dataset");
+ "unable to initialize file storage");
}
}
-#endif /* HAVE_PARALLEL */
/* Success */
ret_value = dataset;
@@ -1505,14 +1493,16 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
}
if (NULL==(tconv_buf=xfer_parms->tconv_buf)) {
/* Allocate temporary buffer */
- if (FAIL==(tconv_id = H5TB_get_buf (target_size,1,(void **)&tconv_buf))) {
+ if (FAIL==(tconv_id = H5TB_get_buf(target_size, 1,
+ (void **)&tconv_buf))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for type conversion");
}
}
if (need_bkg && NULL==(bkg_buf=xfer_parms->bkg_buf)) {
/* Allocate temporary buffer */
- if (FAIL==(bkg_id = H5TB_get_buf (request_nelmts*dst_type_size,1,(void **)&bkg_buf))) {
+ if (FAIL==(bkg_id = H5TB_get_buf(request_nelmts*dst_type_size, 1,
+ (void **)&bkg_buf))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for type conversion");
}
@@ -1873,14 +1863,16 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
}
if (NULL==(tconv_buf=xfer_parms->tconv_buf)) {
/* Allocate temporary buffer */
- if (FAIL==(tconv_id = H5TB_get_buf (target_size,1,(void **)&tconv_buf))) {
+ if (FAIL==(tconv_id = H5TB_get_buf(target_size, 1,
+ (void **)&tconv_buf))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for type conversion");
}
}
if (need_bkg && NULL==(bkg_buf=xfer_parms->bkg_buf)) {
/* Allocate temporary buffer */
- if (FAIL==(bkg_id = H5TB_get_buf (request_nelmts*dst_type_size,1,(void **)&bkg_buf))) {
+ if (FAIL==(bkg_id = H5TB_get_buf(request_nelmts*dst_type_size, 1,
+ (void **)&bkg_buf))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for type conversion");
}
@@ -2069,7 +2061,6 @@ H5D_extend (H5D_t *dataset, const hsize_t *size)
* able to muck things up.
*/
-
/* Increase the size of the data space */
if (NULL==(space=H5S_read (&(dataset->ent)))) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL,
@@ -2080,26 +2071,37 @@ H5D_extend (H5D_t *dataset, const hsize_t *size)
"unable to increase size of data space");
}
- /* Save the new dataspace in the file if necessary */
if (changed>0){
+ /* Save the new dataspace in the file if necessary */
if (H5S_modify (&(dataset->ent), space)<0) {
HGOTO_ERROR (H5E_DATASET, H5E_WRITEERROR, FAIL,
"unable to update file with new dataspace");
}
-#ifdef HAVE_PARALLEL
+
+ /* Initialize the new parts of the dataset */
+#ifdef LATER
+ if (H5S_select_all(space)<0 ||
+ H5S_select_hyperslab(space, H5S_SELECT_DIFF, zero, NULL,
+ old_dims, NULL)<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to select new extents for fill value");
+ }
+#else
/*
- * If the dataset uses chunk storage and is accessed via
- * parallel I/O, need to allocate file space for all extended
- * chunks now.
+ * We don't have the H5S_SELECT_DIFF operator yet. We really only
+ * need it for contiguous datasets because the chunked datasets will
+ * either fill on demand during I/O or attempt a fill of all chunks.
*/
- if (dataset->ent.file->shared->access_parms->driver==H5F_LOW_MPIO &&
- dataset->layout.type==H5D_CHUNKED){
- if (H5D_allocate(dataset, &H5D_xfer_dflt)<0) {
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "fail in file space allocation for chunks");
- }
+ if (H5D_CONTIGUOUS==dataset->layout.type &&
+ dataset->create_parms->fill.buf) {
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL,
+ "unable to select fill value region");
+ }
+#endif
+ if (H5D_init_storage(dataset, space)<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to initialize dataset with fill value");
}
-#endif /* HAVE_PARALLEL */
}
@@ -2161,86 +2163,134 @@ H5D_typeof (H5D_t *dset)
}
-#ifdef HAVE_PARALLEL
/*-------------------------------------------------------------------------
- * Function: H5D_allocate
+ * Function: H5D_init_storage
*
- * Purpose: Allocate file space for the data storage of the dataset.
- * Return SUCCEED if all needed allocation succeed, otherwise
- * FAIL.
+ * Purpose: Initialize the data for a new dataset. If a selection is
+ * defined for SPACE then initialize only that part of the
+ * dataset.
*
* Return: Success: SUCCEED
*
* Failure: FAIL
*
- * Note: Current implementation allocates chunked dataset only.
- *
- * Programmer: Albert Cheng
- * July 9, 1998
+ * Programmer: Robb Matzke
+ * Monday, October 5, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_allocate (H5D_t *dataset, const H5D_xfer_t *xfer)
+H5D_init_storage(H5D_t *dset, const H5S_t *space)
{
- H5S_t *space = NULL;
+ intn ndims;
+ hsize_t dim[H5O_LAYOUT_NDIMS];
+ hsize_t npoints, ptsperbuf;
+ size_t i, size, bufsize=8*1024;
+ hbool_t all_zero;
+ hid_t buf_id = -1;
+ haddr_t addr;
herr_t ret_value = FAIL;
- hsize_t space_dim[H5O_LAYOUT_NDIMS];
- intn space_ndims;
- H5O_layout_t *layout;
+ void *buf = NULL;
- FUNC_ENTER(H5D_allocate, FAIL);
-#ifdef AKC
- printf("Enter %s:\n", FUNC);
-#endif
-
- /* Check args */
- assert(dataset);
- assert(&(dataset->layout));
- layout = &(dataset->layout);
- assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
- assert(H5F_addr_defined(&(layout->addr)));
-
+ FUNC_ENTER(H5D_init_storage, FAIL);
+ assert(dset);
+ assert(space);
- switch (layout->type) {
+ switch (dset->layout.type) {
case H5D_CONTIGUOUS:
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "not implemented yet");
-
- case H5D_CHUNKED:
- if (NULL==(space=H5S_read (&(dataset->ent)))) {
- HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to read data space info from dataset header");
+ /*
+ * If the fill value is non-zero then write the fill value to the
+ * specified selection.
+ */
+ for (i=0, all_zero=TRUE; i<dset->create_parms->fill.size; i++) {
+ if (((uint8*)(dset->create_parms->fill.buf))[i]) {
+ all_zero = FALSE;
+ break;
+ }
}
- /* get current dims of dataset */
- if ((space_ndims=H5S_get_simple_extent_dims(space, space_dim,
- NULL)) <= 0 ||
- space_ndims+1 != layout->ndims){
- HRETURN_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to allocate chunk storage");
+ npoints = H5S_get_simple_extent_npoints(space);
+
+ if (!all_zero && npoints==H5S_get_select_npoints(space)) {
+ /*
+ * Fill the entire current extent with the fill value. We can do
+ * this quite efficiently by making sure we copy the fill value
+ * in relatively large pieces.
+ */
+ ptsperbuf = MAX(1, bufsize/dset->create_parms->fill.size);
+ bufsize = ptsperbuf * dset->create_parms->fill.size;
+ if ((buf_id=H5TB_get_buf(bufsize, TRUE, &buf))<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to get buffer for fill value");
+ }
+ H5V_array_fill(buf, dset->create_parms->fill.buf,
+ dset->create_parms->fill.size, ptsperbuf);
+ if (dset->create_parms->efl.nused) {
+ H5F_addr_reset(&addr);
+ } else {
+ addr = dset->layout.addr;
+ }
+
+ while (npoints>0) {
+ size = MIN(ptsperbuf, npoints) * dset->create_parms->fill.size;
+ if (dset->create_parms->efl.nused) {
+ if (H5O_efl_write(dset->ent.file,
+ &(dset->create_parms->efl),
+ &addr, size, buf)<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to write fill value to dataset");
+ }
+ } else {
+ if (H5F_block_write(dset->ent.file, &addr, size,
+ H5D_XFER_DFLT, buf)<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to write fill value to dataset");
+ }
+ }
+ npoints -= MIN(ptsperbuf, npoints);
+ H5F_addr_inc(&addr, size);
+ }
+ } else if (!all_zero) {
+ /*
+ * Fill the specified selection with the fill value.
+ */
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL,
+ "unable to initialize dataset with fill value");
}
- /* copy the element size over */
- space_dim[space_ndims] = layout->dim[space_ndims];
+ break;
- if (H5F_istore_allocate(dataset->ent.file,
- (layout), space_dim, xfer->split_ratios,
- &(dataset->create_parms->pline),
- &(dataset->create_parms->fill))<0) {
- HRETURN(FAIL);
+ case H5D_CHUNKED:
+ /*
+ * If the dataset is accessed via parallel I/O, allocate file space
+ * for all chunks now and initialize each chunk with the fill value.
+ */
+ if (H5F_LOW_MPIO==dset->ent.file->shared->access_parms->driver) {
+ /* We only handle simple data spaces so far */
+ if ((ndims=H5S_get_simple_extent_dims(space, dim, NULL))<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to get simple data space info");
+ }
+ dim[ndims] = dset->layout.dim[ndims];
+ ndims++;
+
+ if (H5F_istore_allocate(dset->ent.file, &(dset->layout),
+ dim, H5D_xfer_dflt.split_ratios,
+ &(dset->create_parms->pline),
+ &(dset->create_parms->fill))<0) {
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to allocate all chunks of dataset");
+ }
}
break;
-
- default:
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "not implemented yet");
}
-
ret_value = SUCCEED;
- done:
- if (space)
- H5S_close(space);
-
+ done:
+ if (buf_id>=0 && H5TB_release_buf(buf_id)<0) {
+ HRETURN_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to release fill value temporary buffer");
+ }
FUNC_LEAVE(ret_value);
}
-#endif
+
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 8c9dd1b..ed0659e 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -2001,7 +2001,6 @@ H5F_istore_debug(H5F_t *f, const haddr_t *addr, FILE * stream, intn indent,
}
-#ifdef HAVE_PARALLEL
/*-------------------------------------------------------------------------
* Function: H5F_istore_get_addr
*
@@ -2172,20 +2171,21 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
if (carry) break;
}
+#ifdef HAVE_PARALLEL
/*
* rky 980923
- * The following barrier is a temporary fix to prevent overwriting
- * real data caused by a race between one proc's call of H5F_istore_allocate
- * (from H5D_allocate, ultimately from H5Dcreate and H5Dextend)
- * and another proc's call of H5Dwrite.
- * Eventually, this barrier should be removed,
- * when H5D_allocate is changed to call H5MF_alloc directly
+ *
+ * The following barrier is a temporary fix to prevent overwriting real
+ * data caused by a race between one proc's call of H5F_istore_allocate
+ * (from H5D_init_storage, ultimately from H5Dcreate and H5Dextend) and
+ * another proc's call of H5Dwrite. Eventually, this barrier should be
+ * removed, when H5D_init_storage is changed to call H5MF_alloc directly
* to allocate space, instead of calling H5F_istore_unlock.
*/
if (MPI_Barrier( f->shared->access_parms->u.mpio.comm )) {
HRETURN_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Barrier failed");
}
+#endif
FUNC_LEAVE(SUCCEED);
}
-#endif
diff --git a/src/H5Fistore.c b/src/H5Fistore.c
index 8c9dd1b..ed0659e 100644
--- a/src/H5Fistore.c
+++ b/src/H5Fistore.c
@@ -2001,7 +2001,6 @@ H5F_istore_debug(H5F_t *f, const haddr_t *addr, FILE * stream, intn indent,
}
-#ifdef HAVE_PARALLEL
/*-------------------------------------------------------------------------
* Function: H5F_istore_get_addr
*
@@ -2172,20 +2171,21 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
if (carry) break;
}
+#ifdef HAVE_PARALLEL
/*
* rky 980923
- * The following barrier is a temporary fix to prevent overwriting
- * real data caused by a race between one proc's call of H5F_istore_allocate
- * (from H5D_allocate, ultimately from H5Dcreate and H5Dextend)
- * and another proc's call of H5Dwrite.
- * Eventually, this barrier should be removed,
- * when H5D_allocate is changed to call H5MF_alloc directly
+ *
+ * The following barrier is a temporary fix to prevent overwriting real
+ * data caused by a race between one proc's call of H5F_istore_allocate
+ * (from H5D_init_storage, ultimately from H5Dcreate and H5Dextend) and
+ * another proc's call of H5Dwrite. Eventually, this barrier should be
+ * removed, when H5D_init_storage is changed to call H5MF_alloc directly
* to allocate space, instead of calling H5F_istore_unlock.
*/
if (MPI_Barrier( f->shared->access_parms->u.mpio.comm )) {
HRETURN_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Barrier failed");
}
+#endif
FUNC_LEAVE(SUCCEED);
}
-#endif
diff --git a/src/H5Fmpio.c b/src/H5Fmpio.c
index 8ca50d8..2ebe5c8 100644
--- a/src/H5Fmpio.c
+++ b/src/H5Fmpio.c
@@ -335,7 +335,8 @@ H5F_mpio_open(const char *name, const H5F_access_t *access_parms, uintn flags,
}
#endif
- mpierr = MPI_File_open(access_parms->u.mpio.comm, (char*)name, mpi_amode, access_parms->u.mpio.info, &fh);
+ mpierr = MPI_File_open(access_parms->u.mpio.comm, (char*)name, mpi_amode,
+ access_parms->u.mpio.info, &fh);
if (MPI_SUCCESS != mpierr) {
MPI_Error_string( mpierr, mpierrmsg, &msglen );
HRETURN_ERROR(H5E_IO, H5E_CANTOPENFILE, NULL, mpierrmsg );
@@ -511,7 +512,7 @@ H5F_mpio_read(H5F_low_t *lf, H5F_access_t *access_parms,
"couldn't convert addr to MPIOffset" );
}
size_i = (int)size;
- if (size_i != size) { /* check type conversion */
+ if ((size_t)size_i != size) { /* check type conversion */
HRETURN_ERROR(H5E_IO, H5E_BADTYPE, FAIL,
"couldn't convert size to int" );
}
@@ -764,7 +765,7 @@ H5F_mpio_write(H5F_low_t *lf, H5F_access_t *access_parms,
"couldn't convert addr to MPIOffset" );
}
size_i = (int)size;
- if (size_i != size) { /* check type conversion */
+ if ((size_t)size_i != size) { /* check type conversion */
HRETURN_ERROR(H5E_IO, H5E_BADTYPE, FAIL,
"couldn't convert size to int" );
}
@@ -975,9 +976,11 @@ H5F_MPIOff_to_haddr( MPI_Offset mpi_off, haddr_t *addr )
* Problems and limitations:
*
* Return: Success: return value is SUCCEED
- * and the MPIOffset contains the converted value
+ * and the MPIOffset contains the converted
+ * value.
*
- * Failure: return value is FAIL, the MPIOffset is undefined
+ * Failure: return value is FAIL, the MPIOffset is
+ * undefined.
*
* Programmer:
* January 30, 1998
diff --git a/test/Makefile.in b/test/Makefile.in
index fc671a4..10923c0 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -12,7 +12,7 @@ CPPFLAGS=-I. -I../src @CPPFLAGS@
# These are our main targets. They should be listed in the order to be
# executed, generally most specific tests to least specific tests.
TESTS=testhdf5 gheap hyperslab istore bittests dtypes dsets cmpd_dset extend \
- external shtype links unlink big mtime
+ external shtype links unlink big mtime fillval
TIMINGS=iopipe chunk ragged overhead
# Temporary files
@@ -24,7 +24,8 @@ MOSTLYCLEAN=cmpd_dset.h5 dataset.h5 extend.h5 istore.h5 tfile1.h5 tfile2.h5 \
gheap1.h5 gheap2.h5 gheap3.h5 gheap4.h5 shtype0.h5 shtype1.h5 \
shtype2a.h5 shtype2b.h5 shtype3.h5 links.h5 chunk.h5 big.data \
big[0-9][0-9][0-9][0-9][0-9].h5 dtypes1.h5 dtypes2.h5 tattr.h5 \
- tselect.h5 mtime.h5 ragged.h5 grptime.h5 unlink.h5 overhead.h5
+ tselect.h5 mtime.h5 ragged.h5 grptime.h5 unlink.h5 overhead.h5 \
+ fillval_[0-9].h5
CLEAN=$(TIMINGS)
# Source and object files for programs... The TEST_SRC list contains all the
@@ -34,7 +35,7 @@ CLEAN=$(TIMINGS)
TEST_SRC=testhdf5.c tattr.c tfile.c theap.c tmeta.c tohdr.c tselect.c tstab.c \
th5s.c dtypes.c hyperslab.c istore.c dsets.c cmpd_dset.c extend.c \
external.c iopipe.c gheap.c shtype.c big.c links.c chunk.c bittests.c \
- mtime.c ragged.c unlink.c overhead.c
+ mtime.c ragged.c unlink.c overhead.c fillval.c
TEST_OBJ=$(TEST_SRC:.c=.o)
# Private header files (not to be installed)...
@@ -112,4 +113,7 @@ unlink: unlink.o ../src/libhdf5.a
overhead: overhead.o ../src/libhdf5.a
$(CC) $(CFLAGS) -o $@ overhead.o ../src/libhdf5.a $(LIBS)
+fillval: fillval.o ../src/libhdf5.a
+ $(CC) $(CFLAGS) -o $@ fillval.o ../src/libhdf5.a $(LIBS)
+
@CONCLUDE@
diff --git a/test/fillval.c b/test/fillval.c
new file mode 100644
index 0000000..3880e23
--- /dev/null
+++ b/test/fillval.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (C) 1998 NCSA
+ * All rights reserved.
+ *
+ * Programmer: Robb Matzke <robb@arborea.spizella.com>
+ * Thursday, October 1, 1998
+ *
+ * Purpose: Tests dataset fill values.
+ */
+#include <fcntl.h>
+#include <hdf5.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/*
+ * Define NO_FILLING if you want to compare how this test works when there is
+ * no fill value (that is, when the fill value is zero).
+ */
+/* #define NO_FILLING */
+
+
+#define FILE_NAME_1 "fillval_1.h5"
+#define FILE_NAME_2 "fillval_2.h5"
+#define FILE_NAME_3 "fillval_3.h5"
+#define FILE_NAME_4 "fillval_4.h5"
+#define FILE_NAME_5 "fillval_5.h5"
+#define FILE_NAME_6 "fillval_6.h5"
+#define FILE_NAME_RAW "fillval.raw"
+
+#include <H5config.h>
+#ifndef HAVE_ATTRIBUTE
+# undef __attribute__
+# define __attribute__(X) /*void*/
+# define __unused__ /*void*/
+#else
+# define __unused__ __attribute__((unused))
+#endif
+
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup
+ *
+ * Purpose: Removes test files
+ *
+ * Return: void
+ *
+ * Programmer: Robb Matzke
+ * Thursday, June 4, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+cleanup(void)
+{
+ if (!getenv("HDF5_NOCLEANUP")) {
+ remove(FILE_NAME_1);
+ remove(FILE_NAME_2);
+ remove(FILE_NAME_3);
+ remove(FILE_NAME_4);
+ remove(FILE_NAME_5);
+ remove(FILE_NAME_6);
+ remove(FILE_NAME_RAW);
+ }
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: display_error_cb
+ *
+ * Purpose: Displays the error stack after printing "*FAILED*".
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Robb Matzke
+ * Wednesday, March 4, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+display_error_cb(void __unused__ *client_data)
+{
+ puts("*FAILED*");
+ H5Eprint(stdout);
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_getset
+ *
+ * Purpose: Tests the H5Pget_fill_value() and H5Pset_fill_value()
+ * functions.
+ *
+ * Return: Success: 0
+ *
+ * Failure: number of errors
+ *
+ * Programmer: Robb Matzke
+ * Thursday, October 1, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_getset(void)
+{
+ herr_t status;
+ hid_t dcpl;
+ int fill_i;
+ hid_t type_ss, type_si;
+ struct fill_si {
+ int v1, v2;
+ } fill_si;
+ struct fill_ss {
+ short v1, v2;
+ } fill_ss, fill_ss_rd;
+
+ printf("%-70s", "Testing property lists");
+ fflush(stdout);
+
+ /*
+ * Create the dataset creation property list and the data types that will
+ * be used during this test.
+ */
+ if ((dcpl=H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if ((type_ss=H5Tcreate(H5T_COMPOUND, sizeof fill_ss))<0 ||
+ H5Tinsert(type_ss, "v1", HOFFSET(struct fill_ss, v1),
+ H5T_NATIVE_SHORT)<0 ||
+ H5Tinsert(type_ss, "v2", HOFFSET(struct fill_ss, v2),
+ H5T_NATIVE_SHORT)<0) {
+ goto error;
+ }
+ if ((type_si=H5Tcreate(H5T_COMPOUND, sizeof fill_si))<0 ||
+ H5Tinsert(type_si, "v1", HOFFSET(struct fill_si, v1),
+ H5T_NATIVE_INT)<0 ||
+ H5Tinsert(type_si, "v2", HOFFSET(struct fill_si, v2),
+ H5T_NATIVE_INT)<0) {
+ goto error;
+ }
+
+ /*
+ * Reading the fill value from a dataset creation property list that has
+ * no fill value should result in a failure.
+ */
+ H5E_BEGIN_TRY {
+ status = H5Pget_fill_value(dcpl, H5T_NATIVE_INT, &fill_i);
+ } H5E_END_TRY;
+ if (status>=0) {
+ puts("*FAILED*");
+ puts(" H5Pget_fill_value() should have been negative");
+ goto error;
+ }
+
+ /*
+ * Set the fill value using a struct as the data type.
+ */
+ fill_ss.v1 = 1111;
+ fill_ss.v2 = 2222;
+ if (H5Pset_fill_value(dcpl, type_ss, &fill_ss)<0) goto error;
+
+ /*
+ * Get the fill value using the same data type that was used to set it.
+ */
+ if (H5Pget_fill_value(dcpl, type_ss, &fill_ss_rd)<0) goto error;
+ if (fill_ss.v1!=fill_ss_rd.v1 || fill_ss.v2!=fill_ss_rd.v2) {
+ puts("*FAILED*");
+ puts(" Failed to get fill value using same data type that was used");
+ puts(" to set the fill value.");
+ goto error;
+ }
+
+ /*
+ * Get the fill value using some other data type.
+ */
+ if (H5Pget_fill_value(dcpl, type_si, &fill_si)<0) goto error;
+ if (fill_ss.v1!=fill_si.v1 || fill_ss.v2!=fill_si.v2) {
+ puts("*FAILED*");
+ puts(" Failed to get fill value using a data type other than what");
+ puts(" was used to set the fill value.");
+ goto error;
+ }
+
+ /*
+ * Reset the fill value
+ */
+ if (H5Pset_fill_value(dcpl, type_si, &fill_si)<0) goto error;
+ if (H5Pget_fill_value(dcpl, type_ss, &fill_ss)<0) goto error;
+ if (fill_si.v1!=fill_ss.v1 || fill_si.v2!=fill_ss.v2) {
+ puts("*FAILED*");
+ puts(" Resetting the fill value was unsuccessful.");
+ goto error;
+ }
+
+ /* Success */
+ if (H5Pclose(dcpl)<0) goto error;
+ puts(" PASSED");
+ return 0;
+
+ error:
+ H5Pclose(dcpl);
+ return 1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_create
+ *
+ * Purpose: Tests creating datasets that have fill values.
+ *
+ * Return: Success: 0
+ *
+ * Failure: number of errors
+ *
+ * Programmer: Robb Matzke
+ * Thursday, October 1, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_create(const char *filename, H5D_layout_t layout)
+{
+ hid_t file, space, dcpl, dset;
+ hsize_t cur_size[5] = {32, 16, 8, 4, 2};
+ hsize_t ch_size[5] = {1, 1, 1, 4, 2};
+ int fillval = 0x4c70f1cd, fillval_rd=0;
+ char test[256];
+
+ if (H5D_CHUNKED==layout) {
+ strcpy(test, "Testing chunked dataset creation");
+ } else {
+ strcpy(test, "Testing contiguous dataset creation");
+ }
+ printf("%-70s", test);
+ fflush(stdout);
+
+ /* Create a file and dataset */
+ if ((file=H5Fcreate(filename, H5F_ACC_TRUNC,
+ H5P_DEFAULT, H5P_DEFAULT))<0) goto error;
+ if ((space=H5Screate_simple(5, cur_size, cur_size))<0) goto error;
+ if ((dcpl=H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if (H5D_CHUNKED==layout) {
+ if (H5Pset_chunk(dcpl, 5, ch_size)<0) goto error;
+ }
+#ifndef NO_FILLING
+ if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval)<0) goto error;
+#endif
+ if ((dset=H5Dcreate(file, "dset", H5T_NATIVE_INT, space, dcpl))<0)
+ goto error;
+ if (H5Dclose(dset)<0) goto error;
+ if (H5Sclose(space)<0) goto error;
+ if (H5Pclose(dcpl)<0) goto error;
+ if (H5Fclose(file)<0) goto error;
+
+ /* Open the file and get the dataset fill value */
+ if ((file=H5Fopen(FILE_NAME_1, H5F_ACC_RDONLY, H5P_DEFAULT))<0)
+ goto error;
+ if ((dset=H5Dopen(file, "dset"))<0) goto error;
+ if ((dcpl=H5Dget_create_plist(dset))<0) goto error;
+#ifndef NO_FILLING
+ if (H5Pget_fill_value(dcpl, H5T_NATIVE_INT, &fillval_rd)<0) goto error;
+ if (fillval_rd!=fillval) {
+ puts("*FAILED*");
+ puts(" Got a different fill value than what was set.");
+ goto error;
+ }
+#endif
+ if (H5Pclose(dcpl)<0) goto error;
+ if (H5Dclose(dset)<0) goto error;
+ if (H5Fclose(file)<0) goto error;
+
+ puts(" PASSED");
+ return 0;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(space);
+ H5Dclose(dset);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_rdwr
+ *
+ * Purpose: Tests fill values for chunked datasets.
+ *
+ * Return: Success: 0
+ *
+ * Failure: number of errors
+ *
+ * Programmer: Robb Matzke
+ * Thursday, October 1, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_rdwr(const char *filename, H5D_layout_t layout)
+{
+ hid_t file, fspace, mspace, dcpl, dset;
+ hsize_t cur_size[5] = {32, 16, 8, 4, 2};
+ hsize_t ch_size[5] = {1, 16, 8, 4, 2};
+ hsize_t one[5] = {1, 1, 1, 1, 1};
+ hssize_t hs_size[5], hs_offset[5], hs_stride[5], nelmts;
+#ifdef NO_FILLING
+ int fillval = 0;
+#else
+ int fillval = 0x4c70f1cd;
+#endif
+ int val_rd, should_be;
+ int i, j, *buf=NULL, odd;
+ char test[256];
+
+ if (H5D_CHUNKED==layout) {
+ strcpy(test, "Testing chunked dataset I/O");
+ } else {
+ strcpy(test, "Testing contiguous dataset I/O");
+ }
+ printf("%-70s", test);
+ fflush(stdout);
+
+ /* Create a file and dataset */
+ if ((file=H5Fcreate(filename, H5F_ACC_TRUNC,
+ H5P_DEFAULT, H5P_DEFAULT))<0) goto error;
+ if ((fspace=H5Screate_simple(5, cur_size, cur_size))<0) goto error;
+ if ((dcpl=H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if (H5D_CHUNKED==layout) {
+ if (H5Pset_chunk(dcpl, 5, ch_size)<0) goto error;
+ }
+#ifndef NO_FILLING
+ if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval)<0) goto error;
+#endif
+ if ((dset=H5Dcreate(file, "dset", H5T_NATIVE_INT, fspace, dcpl))<0)
+ goto error;
+
+ /* Read some data and make sure it's the fill value */
+ if ((mspace=H5Screate_simple(5, one, NULL))<0) goto error;
+ for (i=0; i<1000; i++) {
+ for (j=0; j<5; j++) {
+ hs_offset[j] = rand() % cur_size[j];
+ }
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, NULL,
+ one, NULL)<0) goto error;
+ if (H5Dread(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ &val_rd)<0) goto error;
+ if (val_rd!=fillval) {
+ puts("*FAILED*");
+ puts(" Value read was not a fill value.");
+ printf(" Elmt={%ld,%ld,%ld,%ld,%ld}, read: %u, "
+ "Fill value: %u\n",
+ (long)hs_offset[0], (long)hs_offset[1],
+ (long)hs_offset[2], (long)hs_offset[3],
+ (long)hs_offset[4], val_rd, fillval);
+ goto error;
+ }
+ }
+ if (H5Sclose(mspace)<0) goto error;
+
+ /* Write to all odd data locations */
+ for (i=0, nelmts=1; i<5; i++) {
+ hs_size[i] = cur_size[i]/2;
+ hs_offset[i] = 0;
+ hs_stride[i] = 2;
+ nelmts *= hs_size[i];
+ }
+ if ((mspace=H5Screate_simple(5, hs_size, hs_size))<0) goto error;
+ buf = malloc(nelmts*sizeof(int));
+ for (i=0; i<nelmts; i++) buf[i] = 9999;
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, hs_stride,
+ hs_size, NULL)<0) goto error;
+ if (H5Dwrite(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ buf)<0) goto error;
+ free(buf);
+ buf = NULL;
+ H5Sclose(mspace);
+
+ /* Read some data and make sure it's the right value */
+ if ((mspace=H5Screate_simple(5, one, NULL))<0) goto error;
+ for (i=0; i<1000; i++) {
+ for (j=0, odd=0; j<5; j++) {
+ hs_offset[j] = rand() % cur_size[j];
+ odd += hs_offset[j]%2;
+ }
+ should_be = odd ? fillval : 9999;
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, NULL,
+ one, NULL)<0) goto error;
+ if (H5Dread(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ &val_rd)<0) goto error;
+
+ if (val_rd!=should_be) {
+ puts("*FAILED*");
+ puts(" Value read was not correct.");
+ printf(" Elmt={%ld,%ld,%ld,%ld,%ld}, read: %u, "
+ "should be: %u\n",
+ (long)hs_offset[0], (long)hs_offset[1],
+ (long)hs_offset[2], (long)hs_offset[3],
+ (long)hs_offset[4], val_rd, should_be);
+ goto error;
+ }
+ }
+ if (H5Sclose(mspace)<0) goto error;
+
+
+
+ if (H5Dclose(dset)<0) goto error;
+ if (H5Sclose(fspace)<0) goto error;
+ if (H5Pclose(dcpl)<0) goto error;
+ if (H5Fclose(file)<0) goto error;
+ puts(" PASSED");
+ return 0;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dset);
+ H5Sclose(fspace);
+ H5Sclose(mspace);
+ H5Pclose(dcpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_extend
+ *
+ * Purpose: Test that filling works okay when a dataset is extended.
+ *
+ * Return: Success: 0
+ *
+ * Failure: number of errors
+ *
+ * Programmer: Robb Matzke
+ * Monday, October 5, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_extend(const char *filename, H5D_layout_t layout)
+{
+ hid_t file, fspace, mspace, dcpl, dset;
+ hsize_t cur_size[5] = {32, 16, 8, 4, 2};
+ hsize_t max_size[5] = {128, 64, 32, 16, 8};
+ hsize_t ch_size[5] = {1, 16, 8, 4, 2};
+ hsize_t one[5] = {1, 1, 1, 1, 1};
+ hssize_t hs_size[5], hs_offset[5], hs_stride[5], nelmts;
+#ifdef NO_FILLING
+ int fillval = 0;
+#else
+ int fillval = 0x4c70f1cd;
+#endif
+ int val_rd, should_be;
+ int i, j, *buf=NULL, odd, fd;
+ char test[256];
+
+ if (H5D_CHUNKED==layout) {
+ strcpy(test, "Testing chunked dataset extend");
+ } else {
+ strcpy(test, "Testing contiguous dataset extend");
+ }
+ printf("%-70s", test);
+ fflush(stdout);
+
+ if ((dcpl=H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if (H5D_CHUNKED==layout) {
+ if (H5Pset_chunk(dcpl, 5, ch_size)<0) goto error;
+ }
+#ifndef NO_FILLING
+ if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval)<0) goto error;
+#endif
+
+#if 1
+ /*
+ * Remove this once contiguous datasets can support extensions in other
+ * than the slowest varying dimension. The purpose of this block is to
+ * make only the slowest varying dimension extendible and yet have the
+ * same total number of elements as originally.
+ *
+ * If this is removed prematurely then you will get an error `only the
+ * first dimension can be extendible' as long as the test isn't skipped
+ * below.
+ */
+ if (H5D_CONTIGUOUS==layout) {
+ max_size[0] = (max_size[0]*max_size[1]*max_size[2]*
+ max_size[3]*max_size[4]) /
+ (cur_size[1]*cur_size[2]*cur_size[3]*cur_size[4]);
+ max_size[1] = cur_size[1];
+ max_size[2] = cur_size[2];
+ max_size[3] = cur_size[3];
+ max_size[4] = cur_size[4];
+ }
+#endif
+
+#if 1
+ /*
+ * Remove this once internal contiguous datasets can support
+ * extending. If it's removed prematurely you will get an error
+ * `extendible contiguous non-external dataset' as long as the test isn't
+ * skipped below.
+ */
+ if (H5D_CONTIGUOUS==layout) {
+ nelmts = max_size[0]*max_size[1]*max_size[2]*max_size[3]*max_size[4];
+ if ((fd=open(FILE_NAME_RAW, O_RDWR|O_CREAT|O_TRUNC, 0666))<0 ||
+ close(fd)<0) goto error;
+ if (H5Pset_external(dcpl, FILE_NAME_RAW, 0, nelmts*sizeof(int))<0)
+ goto error;
+ }
+#endif
+
+#if 1
+ /*
+ * Remove this when contiguous datasets can be exended to some
+ * predetermined fininte size, even if it's just in the slowest varying
+ * dimension. If it's removed prematurely then you'll get one of the
+ * errors described above or `unable to select fill value region'.
+ */
+ if (H5D_CONTIGUOUS==layout) {
+ puts(" SKIP");
+ puts(" Not implemented yet -- needs H5S_SELECT_DIFF operator");
+ return 0;
+ }
+#endif
+
+ /* Create a file and dataset */
+ if ((file=H5Fcreate(filename, H5F_ACC_TRUNC,
+ H5P_DEFAULT, H5P_DEFAULT))<0) goto error;
+ if ((fspace=H5Screate_simple(5, cur_size, max_size))<0) goto error;
+ if ((dset=H5Dcreate(file, "dset", H5T_NATIVE_INT, fspace, dcpl))<0)
+ goto error;
+
+ /* Read some data and make sure it's the fill value */
+ if ((mspace=H5Screate_simple(5, one, NULL))<0) goto error;
+ for (i=0; i<1000; i++) {
+ for (j=0; j<5; j++) {
+ hs_offset[j] = rand() % cur_size[j];
+ }
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, NULL,
+ one, NULL)<0) goto error;
+ if (H5Dread(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ &val_rd)<0) goto error;
+ if (val_rd!=fillval) {
+ puts("*FAILED*");
+ puts(" Value read was not a fill value.");
+ printf(" Elmt={%ld,%ld,%ld,%ld,%ld}, read: %u, "
+ "Fill value: %u\n",
+ (long)hs_offset[0], (long)hs_offset[1],
+ (long)hs_offset[2], (long)hs_offset[3],
+ (long)hs_offset[4], val_rd, fillval);
+ goto error;
+ }
+ }
+ if (H5Sclose(mspace)<0) goto error;
+
+ /* Write to all odd data locations */
+ for (i=0, nelmts=1; i<5; i++) {
+ hs_size[i] = cur_size[i]/2;
+ hs_offset[i] = 0;
+ hs_stride[i] = 2;
+ nelmts *= hs_size[i];
+ }
+ if ((mspace=H5Screate_simple(5, hs_size, hs_size))<0) goto error;
+ buf = malloc(nelmts*sizeof(int));
+ for (i=0; i<nelmts; i++) buf[i] = 9999;
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, hs_stride,
+ hs_size, NULL)<0) goto error;
+ if (H5Dwrite(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ buf)<0) goto error;
+ free(buf);
+ buf = NULL;
+ H5Sclose(mspace);
+
+ /* Read some data and make sure it's the right value */
+ if ((mspace=H5Screate_simple(5, one, NULL))<0) goto error;
+ for (i=0; i<1000; i++) {
+ for (j=0, odd=0; j<5; j++) {
+ hs_offset[j] = rand() % cur_size[j];
+ odd += hs_offset[j]%2;
+ }
+ should_be = odd ? fillval : 9999;
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, NULL,
+ one, NULL)<0) goto error;
+ if (H5Dread(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ &val_rd)<0) goto error;
+
+ if (val_rd!=should_be) {
+ puts("*FAILED*");
+ puts(" Value read was not correct.");
+ printf(" Elmt={%ld,%ld,%ld,%ld,%ld}, read: %u, "
+ "should be: %u\n",
+ (long)hs_offset[0], (long)hs_offset[1],
+ (long)hs_offset[2], (long)hs_offset[3],
+ (long)hs_offset[4], val_rd, should_be);
+ goto error;
+ }
+ }
+ if (H5Sclose(mspace)<0) goto error;
+
+ /* Extend the dataset */
+ if (H5Dextend(dset, max_size)<0) goto error;
+ if (H5Sclose(fspace)<0) goto error;
+ if ((fspace=H5Dget_space(dset))<0) goto error;
+
+ /* Read some data and make sure it's the right value */
+ if ((mspace=H5Screate_simple(5, one, NULL))<0) goto error;
+ for (i=0; i<1000; i++) {
+ for (j=0, odd=0; j<5; j++) {
+ hs_offset[j] = rand() % max_size[j];
+ if ((hsize_t)hs_offset[j]>=cur_size[j]) {
+ odd = 1;
+ } else {
+ odd += hs_offset[j]%2;
+ }
+ }
+
+ should_be = odd ? fillval : 9999;
+ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_offset, NULL,
+ one, NULL)<0) goto error;
+ if (H5Dread(dset, H5T_NATIVE_INT, mspace, fspace, H5P_DEFAULT,
+ &val_rd)<0) goto error;
+
+ if (val_rd!=should_be) {
+ puts("*FAILED*");
+ puts(" Value read was not correct.");
+ printf(" Elmt={%ld,%ld,%ld,%ld,%ld}, read: %u, "
+ "should be: %u\n",
+ (long)hs_offset[0], (long)hs_offset[1],
+ (long)hs_offset[2], (long)hs_offset[3],
+ (long)hs_offset[4], val_rd, should_be);
+ goto error;
+ }
+ }
+ if (H5Sclose(mspace)<0) goto error;
+
+ if (H5Dclose(dset)<0) goto error;
+ if (H5Sclose(fspace)<0) goto error;
+ if (H5Pclose(dcpl)<0) goto error;
+ if (H5Fclose(file)<0) goto error;
+ puts(" PASSED");
+ return 0;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dset);
+ H5Sclose(fspace);
+ H5Sclose(mspace);
+ H5Pclose(dcpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Tests fill values
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: Robb Matzke
+ * Thursday, October 1, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int nerrors=0, argno, test_contig=1, test_chunk=1;
+
+ if (argc>=2) {
+ test_contig = test_chunk = 0;
+ for (argno=1; argno<argc; argno++) {
+ if (!strcmp(argv[argno], "contiguous")) {
+ test_contig = 1;
+ } else if (!strcmp(argv[argno], "chunked")) {
+ test_chunk = 1;
+ } else {
+ fprintf(stderr, "usage: %s [contiguous] [chunked]\n", argv[0]);
+ exit(1);
+ }
+ }
+ }
+
+ H5Eset_auto(display_error_cb, NULL);
+
+
+ nerrors += test_getset();
+
+ /* Chunked storage layout tests */
+ if (test_chunk) {
+ nerrors += test_create(FILE_NAME_1, H5D_CHUNKED);
+ nerrors += test_rdwr(FILE_NAME_3, H5D_CHUNKED);
+ nerrors += test_extend(FILE_NAME_5, H5D_CHUNKED);
+ }
+
+ /* Contiguous storage layout tests */
+ if (test_contig) {
+ nerrors += test_create(FILE_NAME_2, H5D_CONTIGUOUS);
+ nerrors += test_rdwr(FILE_NAME_4, H5D_CONTIGUOUS);
+ nerrors += test_extend(FILE_NAME_6, H5D_CONTIGUOUS);
+ }
+
+ if (nerrors) goto error;
+ puts("All fill value tests passed.");
+ cleanup();
+ return 0;
+
+ error:
+ puts("***** FILL VALUE TESTS FAILED *****");
+ return 1;
+}
diff --git a/tools/Makefile.in b/tools/Makefile.in
index 96a757f..d6ea8c9 100644
--- a/tools/Makefile.in
+++ b/tools/Makefile.in
@@ -26,6 +26,9 @@ PROG_SRC=h5debug.c h5import.c h5ls.c h5repart.c h5dump.c h5dumputil.c
PROG_OBJ=$(PROG_SRC:.c=.o)
PRIVATE_HDR=h5tools.h
+# Programs have to be built before they can be tested!
+test: $(PROGS)
+
# How to build the programs...
h5debug: h5debug.o $(LIB) ../src/libhdf5.a
$(CC) $(CFLAGS) -o $@ h5debug.o $(LIBS)