summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaymond Lu <songyulu@hdfgroup.org>2012-08-08 16:08:27 (GMT)
committerRaymond Lu <songyulu@hdfgroup.org>2012-08-08 16:08:27 (GMT)
commitfaba610060cb168d97a7b9c01d95688542e2cf28 (patch)
treee37bba310e7129ae110f7f0e4ec5eb653601e8da
parent8a0b4729cdc7b9edb18e84b2b9182228bd6eaa2e (diff)
downloadhdf5-faba610060cb168d97a7b9c01d95688542e2cf28.zip
hdf5-faba610060cb168d97a7b9c01d95688542e2cf28.tar.gz
hdf5-faba610060cb168d97a7b9c01d95688542e2cf28.tar.bz2
[svn-r22641] Dectris project: I revised the code per Quincey's and Neil's comments. I added a performance benchmark program dectris_perf.c in the test/ directory.
Tested on koala and jam.
-rw-r--r--src/H5Dchunk.c28
-rw-r--r--src/H5Dio.c31
-rw-r--r--src/H5Dprivate.h4
-rw-r--r--src/H5Dpublic.h4
-rw-r--r--src/H5F.c6
-rw-r--r--src/H5FDsec2.c2
-rw-r--r--src/H5Fpkg.h3
-rw-r--r--src/H5Fprivate.h2
-rw-r--r--src/H5Fpublic.h1
-rw-r--r--src/H5Fquery.c29
-rw-r--r--test/Makefile.am4
-rw-r--r--test/Makefile.in47
-rw-r--r--test/dectris_perf.c535
-rw-r--r--test/dectris_tst.c79
14 files changed, 696 insertions, 79 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index e9a50ef..7cde7b6 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -60,8 +60,6 @@
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5Vprivate.h" /* Vector and array functions */
-#include "H5Fprivate.h"
-#include "H5FDprivate.h"
/****************/
@@ -298,8 +296,8 @@ H5FL_BLK_DEFINE_STATIC(chunk);
*-------------------------------------------------------------------------
*/
herr_t
-H5D__direct_write(const H5D_t *dset, hid_t dxpl_id, size_t *offset, size_t buf_size,
- const void *buf)
+H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, unsigned filters, hsize_t *offset,
+ size_t data_size, const void *buf)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
@@ -337,8 +335,12 @@ H5D__direct_write(const H5D_t *dset, hid_t dxpl_id, size_t *offset, size_t buf_s
&udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- /* If the chunk hasn't been allocated on disk, do so now. */
- if(!H5F_addr_defined(udata.addr)) {
+ udata.filter_mask = filters;
+
+ /* Check if the chunk needs to be 'inserted' (could exist already and
+ * the 'insert' operation could resize it)
+ */
+ {
H5D_chk_idx_info_t idx_info; /* Chunked index info */
/* Compose chunked index info struct */
@@ -349,9 +351,11 @@ H5D__direct_write(const H5D_t *dset, hid_t dxpl_id, size_t *offset, size_t buf_s
idx_info.storage = &(dset->shared->layout.storage.u.chunk);
/* Set up the size of chunk for user data */
- udata.nbytes = buf_size;
+ udata.nbytes = data_size;
- /* Create the chunk */
+ /* Create the chunk it if it doesn't exist, or reallocate the chunk
+ * if its size changed.
+ */
if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk")
@@ -372,10 +376,10 @@ H5D__direct_write(const H5D_t *dset, hid_t dxpl_id, size_t *offset, size_t buf_s
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
} /* end if */
- /* Write the data to the file driver, instead of H5F_block_write */
- lf = H5F_DRIVER(dset->oloc.file);
- if(H5FD_write(lf, dxpl_id, H5FD_MEM_DRAW, udata.addr, buf_size, buf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
+ /* Write the data to the file */
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, data_size, dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
+
done:
/*FUNC_LEAVE_NOAPI(ret_value)*/
diff --git a/src/H5Dio.c b/src/H5Dio.c
index accb948..9576177 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -28,6 +28,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
+#include "H5Sprivate.h" /* Dataspace */
#ifdef H5_HAVE_PARALLEL
/* Remove this if H5R_DATASET_REGION is no longer used in this file */
@@ -284,10 +285,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, size_t *offset, size_t buf_size,
- const void *buf)
+H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, unsigned filters, hsize_t *offset,
+ size_t data_size, const void *buf)
{
H5D_t *dset = NULL;
+ int ndims;
+ hsize_t *dims;
+ int i;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -298,6 +302,9 @@ H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, size_t *offset, size_t buf_size,
if(NULL == dset->oloc.file)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+ if(H5D_CHUNKED != dset->shared->layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+
/* Get the default dataset transfer property list if the user didn't provide one */
if(H5P_DEFAULT == dxpl_id)
dxpl_id= H5P_DATASET_XFER_DEFAULT;
@@ -308,8 +315,26 @@ H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, size_t *offset, size_t buf_size,
if(!buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer")
+ ndims = (int)H5S_GET_EXTENT_NDIMS(dset->shared->space);
+ dims = (hsize_t *)HDmalloc(ndims*sizeof(hsize_t));
+
+ if(H5S_get_simple_extent_dims(dset->shared->space, dims, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
+
+ /* Make sure the offset doesn't exceed the dataset's dimensions */
+ for(i=0; i<ndims; i++)
+ if(offset[i] > dims[i])
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
+
+ /* Make sure the offset fall right on a chunk's boundary */
+ for(i=0; i<ndims; i++)
+ if(offset[i] % dset->shared->layout.u.chunk.dim[i])
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
+
+ HDfree(dims);
+
/* write raw data */
- if(H5D__direct_write(dset, dxpl_id, offset, buf_size, buf) < 0)
+ if(H5D__chunk_direct_write(dset, dxpl_id, filters, offset, data_size, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
done:
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index b52800b..342c284 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -176,8 +176,8 @@ H5_DLL herr_t H5D_chunk_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_ad
H5_DLL herr_t H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
int indent, int fwidth, unsigned ndims);
-H5_DLL herr_t H5D__direct_write(const H5D_t *dset, hid_t dxpl_id, size_t *offset,
- size_t buf_size, const void *buf);
+H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, unsigned filters,
+ hsize_t *offset, size_t data_size, const void *buf);
#endif /* _H5Dprivate_H */
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index b072e64..58c2a2b 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -118,8 +118,8 @@ H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, void *buf/*out*/);
H5_DLL herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, const void *buf);
-H5_DLL herr_t H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, size_t *offset, size_t buf_size,
- const void *buf);
+H5_DLL herr_t H5PSIdirect_write(hid_t dset_id, hid_t dxpl_id, unsigned filters, hsize_t *offset,
+ size_t data_size, const void *buf);
H5_DLL herr_t H5Diterate(void *buf, hid_t type_id, hid_t space_id,
H5D_operator_t op, void *operator_data);
H5_DLL herr_t H5Dvlen_reclaim(hid_t type_id, hid_t space_id, hid_t plist_id, void *buf);
diff --git a/src/H5F.c b/src/H5F.c
index 5d13ea6..b1f052d 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -1251,7 +1251,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
* way for us to detect it here anyway).
*/
if(drvr->cmp)
- tent_flags = flags & ~(H5F_ACC_CREAT|H5F_ACC_TRUNC|H5F_ACC_EXCL);
+ tent_flags = flags & ~(H5F_ACC_CREAT|H5F_ACC_TRUNC|H5F_ACC_EXCL|H5F_ACC_SYNC);
else
tent_flags = flags;
@@ -1454,9 +1454,9 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
if(!filename || !*filename)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file name")
/* In this routine, we only accept the following flags:
- * H5F_ACC_EXCL, H5F_ACC_TRUNC and H5F_ACC_DEBUG
+ * H5F_ACC_SYNC, H5F_ACC_EXCL, H5F_ACC_TRUNC and H5F_ACC_DEBUG
*/
- if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC | H5F_ACC_DEBUG))
+ if(flags & ~(H5F_ACC_SYNC | H5F_ACC_EXCL | H5F_ACC_TRUNC | H5F_ACC_DEBUG))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags")
/* The H5F_ACC_EXCL and H5F_ACC_TRUNC flags are mutually exclusive */
if((flags & H5F_ACC_EXCL) && (flags & H5F_ACC_TRUNC))
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index 4201e07..085465c 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -357,6 +357,8 @@ H5FD_sec2_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
o_flags |= O_CREAT;
if(H5F_ACC_EXCL & flags)
o_flags |= O_EXCL;
+ if(H5F_ACC_SYNC & flags)
+ o_flags |= O_SYNC;
/* Open the file */
if((fd = HDopen(name, o_flags, 0666)) < 0) {
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 334879c..2d4678b 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -63,7 +63,8 @@
#define H5F_SUPER_ALL_FLAGS (H5F_SUPER_WRITE_ACCESS | H5F_SUPER_FILE_OK)
/* Mask for removing private file access flags */
-#define H5F_ACC_PUBLIC_FLAGS 0x001fu
+/* #define H5F_ACC_PUBLIC_FLAGS 0x001fu */
+#define H5F_ACC_PUBLIC_FLAGS 0x003fu
/* Free space section+aggregator merge flags */
#define H5F_FS_MERGE_METADATA 0x01 /* Section can merge with metadata aggregator */
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 3b3af49..1ef40b0 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -234,7 +234,6 @@ typedef struct H5F_blk_aggr_t H5F_blk_aggr_t;
#define H5F_FILE_ID(F) ((F)->file_id)
#define H5F_PARENT(F) ((F)->parent)
#define H5F_NMOUNTS(F) ((F)->nmounts)
-#define H5F_DRIVER(F) ((F)->shared->lf)
#define H5F_DRIVER_ID(F) ((F)->shared->lf->driver_id)
#define H5F_GET_FILENO(F,FILENUM) ((FILENUM) = (F)->shared->lf->fileno)
#define H5F_HAS_FEATURE(F,FL) ((F)->shared->lf->feature_flags & (FL))
@@ -277,7 +276,6 @@ typedef struct H5F_blk_aggr_t H5F_blk_aggr_t;
#define H5F_FILE_ID(F) (H5F_get_file_id(F))
#define H5F_PARENT(F) (H5F_get_parent(F))
#define H5F_NMOUNTS(F) (H5F_get_nmounts(F))
-#define H5F_DRIVER(F) (H5F_get_driver(F))
#define H5F_DRIVER_ID(F) (H5F_get_driver_id(F))
#define H5F_GET_FILENO(F,FILENUM) (H5F_get_fileno((F), &(FILENUM)))
#define H5F_HAS_FEATURE(F,FL) (H5F_has_feature(F,FL))
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index f32b3e0..7b7acb4 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -48,6 +48,7 @@
#define H5F_ACC_EXCL (H5CHECK 0x0004u) /*fail if file already exists*/
#define H5F_ACC_DEBUG (H5CHECK 0x0008u) /*print debug info */
#define H5F_ACC_CREAT (H5CHECK 0x0010u) /*create non-existing files */
+#define H5F_ACC_SYNC (H5CHECK 0x0020u) /*no filesystem caching */
/* Value passed to H5Pset_elink_acc_flags to cause flags to be taken from the
* parent file. */
diff --git a/src/H5Fquery.c b/src/H5Fquery.c
index 1be5c50..c04ba24 100644
--- a/src/H5Fquery.c
+++ b/src/H5Fquery.c
@@ -919,35 +919,6 @@ H5F_get_driver_id(const H5F_t *f)
/*-------------------------------------------------------------------------
- * Function: H5F_get_driver
- *
- * Purpose: Quick and dirty routine to retrieve the file's 'driver' structure
- * (Mainly added to stop non-file routines from poking about in the
- * H5F_t data structure)
- *
- * Return: 'driver' structure on success/abort on failure (shouldn't fail)
- *
- * Programmer: Raymond Lu
- * 30 July 2012
- *
- *-------------------------------------------------------------------------
- */
-H5FD_t *
-H5F_get_driver(const H5F_t *f)
-{
- /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->lf);
-
- FUNC_LEAVE_NOAPI(f->shared->lf)
-} /* end H5F_get_driver() */
-
-
-
-/*-------------------------------------------------------------------------
* Function: H5F_get_fileno
*
* Purpose: Quick and dirty routine to retrieve the file's 'fileno' value
diff --git a/test/Makefile.am b/test/Makefile.am
index 0197e99..006e24b 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -40,7 +40,7 @@ TEST_PROG= testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
big mtime fillval mount flush1 flush2 app_ref enum \
- set_extent ttsafe dectris_tst \
+ set_extent ttsafe dectris_tst dectris_perf \
getname vfd ntypes dangle dtransform reserved cross_read \
freespace mf farray earray btree2 fheap file_image
@@ -128,7 +128,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
earray.h5 efc[0-5].h5 log_vfd_out.log \
new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \
split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \
- file_image_core_test.h5.copy dectris.h5
+ file_image_core_test.h5.copy dectris.h5 dectris_perf.h5
# Sources for testhdf5 executable
testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
diff --git a/test/Makefile.in b/test/Makefile.in
index ddc481e..21706d7 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -89,8 +89,8 @@ am__EXEEXT_1 = testhdf5$(EXEEXT) lheap$(EXEEXT) ohdr$(EXEEXT) \
unlink$(EXEEXT) big$(EXEEXT) mtime$(EXEEXT) fillval$(EXEEXT) \
mount$(EXEEXT) flush1$(EXEEXT) flush2$(EXEEXT) \
app_ref$(EXEEXT) enum$(EXEEXT) set_extent$(EXEEXT) \
- ttsafe$(EXEEXT) dectris_tst$(EXEEXT) getname$(EXEEXT) \
- vfd$(EXEEXT) ntypes$(EXEEXT) dangle$(EXEEXT) \
+ ttsafe$(EXEEXT) dectris_tst$(EXEEXT) dectris_perf$(EXEEXT) \
+ getname$(EXEEXT) vfd$(EXEEXT) ntypes$(EXEEXT) dangle$(EXEEXT) \
dtransform$(EXEEXT) reserved$(EXEEXT) cross_read$(EXEEXT) \
freespace$(EXEEXT) mf$(EXEEXT) farray$(EXEEXT) earray$(EXEEXT) \
btree2$(EXEEXT) fheap$(EXEEXT) file_image$(EXEEXT)
@@ -148,6 +148,10 @@ dangle_SOURCES = dangle.c
dangle_OBJECTS = dangle.$(OBJEXT)
dangle_LDADD = $(LDADD)
dangle_DEPENDENCIES = libh5test.la $(LIBHDF5)
+dectris_perf_SOURCES = dectris_perf.c
+dectris_perf_OBJECTS = dectris_perf.$(OBJEXT)
+dectris_perf_LDADD = $(LDADD)
+dectris_perf_DEPENDENCIES = libh5test.la $(LIBHDF5)
dectris_tst_SOURCES = dectris_tst.c
dectris_tst_OBJECTS = dectris_tst.$(OBJEXT)
dectris_tst_LDADD = $(LDADD)
@@ -430,23 +434,24 @@ am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
am__v_GEN_0 = @echo " GEN " $@;
SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c bittests.c \
btree2.c cache.c cache_api.c cache_tagging.c cmpd_dset.c \
- cross_read.c dangle.c dectris_tst.c dsets.c dt_arith.c \
- dtransform.c dtypes.c earray.c efc.c enum.c err_compat.c \
- error_test.c extend.c external.c farray.c fheap.c file_image.c \
- fillval.c filter_fail.c flush1.c flush2.c freespace.c \
- gen_bad_ohdr.c gen_bogus.c gen_cross.c gen_deflate.c \
- gen_file_image.c gen_filespace.c gen_filters.c gen_new_array.c \
- gen_new_fill.c gen_new_group.c gen_new_mtime.c gen_new_super.c \
- gen_noencoder.c gen_nullspace.c gen_sizes_lheap.c \
- gen_specmetaread.c gen_udlinks.c getname.c gheap.c hyperslab.c \
- istore.c lheap.c links.c links_env.c mf.c mount.c mtime.c \
- ntypes.c objcopy.c ohdr.c pool.c reserved.c set_extent.c \
- space_overflow.c stab.c tcheck_version.c $(testhdf5_SOURCES) \
- testmeta.c $(ttsafe_SOURCES) unlink.c vfd.c
+ cross_read.c dangle.c dectris_perf.c dectris_tst.c dsets.c \
+ dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \
+ err_compat.c error_test.c extend.c external.c farray.c fheap.c \
+ file_image.c fillval.c filter_fail.c flush1.c flush2.c \
+ freespace.c gen_bad_ohdr.c gen_bogus.c gen_cross.c \
+ gen_deflate.c gen_file_image.c gen_filespace.c gen_filters.c \
+ gen_new_array.c gen_new_fill.c gen_new_group.c gen_new_mtime.c \
+ gen_new_super.c gen_noencoder.c gen_nullspace.c \
+ gen_sizes_lheap.c gen_specmetaread.c gen_udlinks.c getname.c \
+ gheap.c hyperslab.c istore.c lheap.c links.c links_env.c mf.c \
+ mount.c mtime.c ntypes.c objcopy.c ohdr.c pool.c reserved.c \
+ set_extent.c space_overflow.c stab.c tcheck_version.c \
+ $(testhdf5_SOURCES) testmeta.c $(ttsafe_SOURCES) unlink.c \
+ vfd.c
DIST_SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c \
bittests.c btree2.c cache.c cache_api.c cache_tagging.c \
- cmpd_dset.c cross_read.c dangle.c dectris_tst.c dsets.c \
- dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \
+ cmpd_dset.c cross_read.c dangle.c dectris_perf.c dectris_tst.c \
+ dsets.c dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \
err_compat.c error_test.c extend.c external.c farray.c fheap.c \
file_image.c fillval.c filter_fail.c flush1.c flush2.c \
freespace.c gen_bad_ohdr.c gen_bogus.c gen_cross.c \
@@ -776,7 +781,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog accum.h5 cmpd_dset.h5 \
earray.h5 efc[0-5].h5 log_vfd_out.log new_multi_file_v16-r.h5 \
new_multi_file_v16-s.h5 split_get_file_image_test-m.h5 \
split_get_file_image_test-r.h5 file_image_core_test.h5.copy \
- dectris.h5
+ dectris.h5 dectris_perf.h5
INCLUDES = -I$(top_srcdir)/src -I$(top_builddir)/src
# Test script for error_test and err_compat
@@ -795,7 +800,7 @@ TEST_PROG = testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
big mtime fillval mount flush1 flush2 app_ref enum \
- set_extent ttsafe dectris_tst \
+ set_extent ttsafe dectris_tst dectris_perf \
getname vfd ntypes dangle dtransform reserved cross_read \
freespace mf farray earray btree2 fheap file_image
@@ -958,6 +963,9 @@ cross_read$(EXEEXT): $(cross_read_OBJECTS) $(cross_read_DEPENDENCIES)
dangle$(EXEEXT): $(dangle_OBJECTS) $(dangle_DEPENDENCIES)
@rm -f dangle$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(dangle_OBJECTS) $(dangle_LDADD) $(LIBS)
+dectris_perf$(EXEEXT): $(dectris_perf_OBJECTS) $(dectris_perf_DEPENDENCIES)
+ @rm -f dectris_perf$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(dectris_perf_OBJECTS) $(dectris_perf_LDADD) $(LIBS)
dectris_tst$(EXEEXT): $(dectris_tst_OBJECTS) $(dectris_tst_DEPENDENCIES)
@rm -f dectris_tst$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(dectris_tst_OBJECTS) $(dectris_tst_LDADD) $(LIBS)
@@ -1160,6 +1168,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cmpd_dset.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cross_read.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dangle.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dectris_perf.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dectris_tst.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dsets.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dt_arith.Po@am__quote@
diff --git a/test/dectris_perf.c b/test/dectris_perf.c
new file mode 100644
index 0000000..e743a19
--- /dev/null
+++ b/test/dectris_perf.c
@@ -0,0 +1,535 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This test is for the DECTRIS project to the H5PSIdirect_write function
+ *
+ */
+
+#include "h5test.h"
+#include <zlib.h>
+#include <math.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+const char *FILENAME[] = {
+ "dectris_perf",
+ "unix.raw",
+ NULL
+};
+
+#define DIRECT_DSET "direct_dset"
+#define COMPRESSED_DSET "compressed_dset"
+#define NO_COMPRESS_DSET "no_compress_dset"
+#define RANK 3
+#define NX 100
+#define NY 100
+#define NZ 25
+#define CHUNK_NX 1
+#define CHUNK_NY 100
+#define CHUNK_NZ 25
+
+#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001)+12)
+char filename[1024];
+unsigned int *outbuf[NX];
+size_t data_size[NX];
+double total_size = 0.0;
+unsigned int *direct_buf[NX];
+double MB = 1048576.0;
+
+/*--------------------------------------------------
+ * Function to report IO rate
+ *--------------------------------------------------
+ */
+void reportTime(struct timeval start, double mbytes)
+{
+ struct timeval timeval_stop,timeval_diff;
+
+ /*end timing*/
+ gettimeofday(&timeval_stop,NULL);
+
+ /* Calculate the elapsed gettimeofday time */
+ timeval_diff.tv_usec=timeval_stop.tv_usec-start.tv_usec;
+ timeval_diff.tv_sec=timeval_stop.tv_sec-start.tv_sec;
+
+ if(timeval_diff.tv_usec<0) {
+ timeval_diff.tv_usec+=1000000;
+ timeval_diff.tv_sec--;
+ } /* end if */
+
+/*printf("mbytes=%lf, sec=%lf, usec=%lf\n", mbytes, (double)timeval_diff.tv_sec, (double)timeval_diff.tv_usec);*/
+ printf("MBytes/second: %lf\n", (double)mbytes/((double)timeval_diff.tv_sec+((double)timeval_diff.tv_usec/(double)1000000.0)));
+}
+
+/*--------------------------------------------------
+ * Create file, datasets, and initialize data
+ *--------------------------------------------------
+ */
+int create_file(hid_t fapl_id)
+{
+ hid_t file; /* handles */
+ hid_t fapl;
+ hid_t cparms;
+ hid_t dataspace, dataset;
+ hsize_t dims[RANK] = {NX, NY, NZ};
+ hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
+ unsigned int aggression = 9; /* Compression aggression setting */
+ int ret;
+ int i, j, n;
+
+ unsigned int *p;
+ size_t buf_size = CHUNK_NY*CHUNK_NZ*sizeof(unsigned int);
+
+ const Bytef *z_src;
+ Bytef *z_dst; /*destination buffer */
+ uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
+ uLong z_src_nbytes = (uLong)buf_size;
+
+ TESTING("Create a file and dataset");
+
+ /*
+ * Create the data space with unlimited dimensions.
+ */
+ if((dataspace = H5Screate_simple(RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Create a new file. If file exists its contents will be overwritten.
+ */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Modify dataset creation properties, i.e. enable chunking and compression
+ */
+ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_chunk( cparms, RANK, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Create a new dataset within the file using cparms
+ * creation properties.
+ */
+ if((dataset = H5Dcreate2(file, NO_COMPRESS_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR;
+
+ /* Set compression */
+ if(H5Pset_deflate( cparms, aggression) < 0)
+ TEST_ERROR;
+
+ if((dataset = H5Dcreate2(file, DIRECT_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR;
+
+
+ if((dataset = H5Dcreate2(file, COMPRESSED_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR;
+
+ if(H5Fclose(file) < 0)
+ TEST_ERROR;
+
+ if(H5Sclose(dataspace) < 0)
+ TEST_ERROR;
+
+ if(H5Pclose(cparms) < 0)
+ TEST_ERROR;
+
+ /* Initialize data for chunks */
+ for(i = 0; i < NX; i++) {
+ p = direct_buf[i] = (unsigned int*)malloc(CHUNK_NY*CHUNK_NZ*sizeof(unsigned int));
+
+ for(j=0; j < CHUNK_NY*CHUNK_NZ; j++, p++)
+ *p = rand() % 65000;
+
+ z_src = (const Bytef*)direct_buf[i];
+
+ z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
+ /* Allocate output (compressed) buffer */
+ outbuf[i] = (unsigned int*)malloc((size_t)z_dst_nbytes);
+ z_dst = (Bytef *)outbuf[i];
+
+ /* Perform compression from the source to the destination buffer */
+ ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
+
+ data_size[i] = (size_t)z_dst_nbytes;
+ total_size += data_size[i];
+
+ /* Check for various zlib errors */
+ if(Z_BUF_ERROR == ret) {
+ fprintf(stderr, "overflow");
+ TEST_ERROR;
+ } else if(Z_MEM_ERROR == ret) {
+ fprintf(stderr, "deflate memory error");
+ TEST_ERROR;
+ } else if(Z_OK != ret) {
+ fprintf(stderr, "other deflate error");
+ TEST_ERROR;
+ }
+ }
+
+
+ PASSED();
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*--------------------------------------------------
+ * Benchmark the performance of the new function
+ *--------------------------------------------------
+ */
+int
+test_direct_write(hid_t fapl_id)
+{
+ hid_t file; /* handles */
+ hid_t dataspace, dataset;
+ hid_t dxpl;
+ herr_t status;
+ int i;
+
+ unsigned filter_mask = 0;
+ hsize_t offset[RANK+1] = {0, 0, 0, 0};
+
+ struct timeval timeval_start;
+
+ TESTING("H5PSIdirect_write for DECTRIS project");
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ TEST_ERROR;
+
+ /* Start the timer */
+ gettimeofday(&timeval_start,NULL);
+
+ /* Reopen the file and dataset */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0)
+ TEST_ERROR;
+
+ if((dataset = H5Dopen(file, DIRECT_DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+
+ /* Write the compressed chunk data repeatedly to cover all the chunks in the
+ * dataset, using the direct writing function. */
+ for(i=0; i<NX; i++) {
+ status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, data_size[i], outbuf[i]);
+ (offset[0])++;
+ }
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dataset);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+
+ /* Report the performance */
+ reportTime(timeval_start, (double)(total_size/MB));
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*--------------------------------------------------
+ * Benchmark the performance of the regular H5Dwrite
+ * with compression filter enabled.
+ *--------------------------------------------------
+ */
+int
+test_compressed_write(hid_t fapl_id)
+{
+ hid_t file; /* handles */
+ hid_t dataspace, dataset;
+ hid_t mem_space;
+ hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
+ hid_t dxpl;
+ herr_t status;
+ int i;
+
+ hsize_t start[RANK]; /* Start of hyperslab */
+ hsize_t stride[RANK]; /* Stride of hyperslab */
+ hsize_t count[RANK]; /* Block count */
+ hsize_t block[RANK]; /* Block sizes */
+
+ struct timeval timeval_start;
+
+ TESTING("H5Dwrite with compression enabled");
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ TEST_ERROR;
+
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Start the timer */
+ gettimeofday(&timeval_start,NULL);
+
+ /* Reopen the file and dataset */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0)
+ TEST_ERROR;
+
+ if((dataset = H5Dopen(file, COMPRESSED_DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if((dataspace = H5Dget_space(dataset)) < 0)
+ TEST_ERROR;
+
+ start[0] = start[1] = start[2] = 0;
+ stride[0] = stride[1] = stride[2] = 1;
+ count[0] = count[1] = count[2] = 1;
+ block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ;
+
+ for(i=0; i<NX; i++) {
+ /*
+ * Select hyperslab for one chunk in the file
+ */
+ if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
+ TEST_ERROR;
+ (start[0])++;
+
+ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace,
+ H5P_DEFAULT, direct_buf[i])) < 0)
+ TEST_ERROR;
+ }
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dataset);
+ H5Sclose(dataspace);
+ H5Sclose(mem_space);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+
+ /* Report the performance */
+ reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB));
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(dataspace);
+ H5Sclose(mem_space);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*--------------------------------------------------
+ * Benchmark the performance of the regular H5Dwrite
+ * with compression
+ *--------------------------------------------------
+ */
+int
+test_no_compress_write(hid_t fapl_id)
+{
+ hid_t file; /* handles */
+ hid_t dataspace, dataset;
+ hid_t mem_space;
+ hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
+ hid_t dxpl;
+ herr_t status;
+ int i;
+
+ hsize_t start[RANK]; /* Start of hyperslab */
+ hsize_t stride[RANK]; /* Stride of hyperslab */
+ hsize_t count[RANK]; /* Block count */
+ hsize_t block[RANK]; /* Block sizes */
+
+ struct timeval timeval_start;
+
+ TESTING("H5Dwrite without compression");
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ TEST_ERROR;
+
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Start the timer */
+ gettimeofday(&timeval_start,NULL);
+
+ /* Reopen the file and dataset */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0)
+ TEST_ERROR;
+
+ if((dataset = H5Dopen(file, NO_COMPRESS_DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if((dataspace = H5Dget_space(dataset)) < 0)
+ TEST_ERROR;
+
+ start[0] = start[1] = start[2] = 0;
+ stride[0] = stride[1] = stride[2] = 1;
+ count[0] = count[1] = count[2] = 1;
+ block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ;
+
+ for(i=0; i<NX; i++) {
+ /*
+ * Select hyperslab for one chunk in the file
+ */
+ if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
+ TEST_ERROR;
+ (start[0])++;
+
+ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace,
+ H5P_DEFAULT, direct_buf[i])) < 0)
+ TEST_ERROR;
+ }
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dataset);
+ H5Sclose(dataspace);
+ H5Sclose(mem_space);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+
+ /* Report the performance */
+ reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB));
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(dataspace);
+ H5Sclose(mem_space);
+ H5Pclose(dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*--------------------------------------------------
+ * Benchmark the performance for writing compressed
+ * data to a Unix file
+ *--------------------------------------------------
+ */
+int
+test_unix_write(void)
+{
+ int file, flag;
+ ssize_t op_size;
+ int i;
+ struct timeval timeval_start;
+
+ TESTING("Write compressed data to a Unix file");
+
+ /* create file*/
+ flag = O_CREAT|O_TRUNC|O_WRONLY|O_SYNC;
+
+ /* Start the timer */
+ gettimeofday(&timeval_start,NULL);
+
+ if ((file=open(FILENAME[1],flag,S_IRWXU))== -1)
+ TEST_ERROR;
+
+ /* Write the compressed chunk data repeatedly to cover all the chunks in the
+ * dataset, using the direct writing function. */
+ for(i=0; i<NX; i++) {
+ op_size = write(file, outbuf[i],data_size[i]);
+ if (op_size < 0)
+ {
+ printf(" Error in writing data to file because %s \n", strerror(errno));
+ TEST_ERROR;
+ }
+ else if (op_size == 0)
+ {
+ printf(" unable to write sufficent data to file because %s \n", strerror(errno));
+ TEST_ERROR;
+ }
+ }
+
+ if (close(file) < 0)
+ {
+ printf(" unable to close the file\n");
+ TEST_ERROR;
+ }
+
+ /* Report the performance */
+ reportTime(timeval_start, (double)(total_size/MB));
+
+ PASSED();
+ return 0;
+
+error:
+ return 1;
+}
+
+/*--------------------------------------------------
+ * Main function
+ *--------------------------------------------------
+ */
+int
+main (void)
+{
+ hid_t fapl;
+ int i;
+
+ /* Testing setup */
+ h5_reset();
+ fapl = h5_fileaccess();
+
+ h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+
+ create_file(fapl);
+ test_direct_write(fapl);
+ test_no_compress_write(fapl);
+ test_compressed_write(fapl);
+ test_unix_write();
+
+ for(i=0; i<NX; i++) {
+ free(outbuf[i]);
+ free(direct_buf[i]);
+ }
+
+ h5_cleanup(FILENAME, fapl);
+ return 0;
+}
diff --git a/test/dectris_tst.c b/test/dectris_tst.c
index f2cc356..a6bbaab 100644
--- a/test/dectris_tst.c
+++ b/test/dectris_tst.c
@@ -55,6 +55,7 @@ main (void)
int check[NX][NY];
int i, j, n;
+ unsigned filter_mask = 0;
int direct_buf[CHUNK_NX][CHUNK_NY];
int check_chunk[CHUNK_NX][CHUNK_NY];
hsize_t offset[3] = {0, 0, 0};
@@ -159,13 +160,15 @@ main (void)
* dataset, using the direct writing function. */
for(i=0; i<NX/CHUNK_NX; i++) {
for(j=0; j<NY/CHUNK_NY; j++) {
- status = H5PSIdirect_write(dataset, dxpl, offset, z_dst_nbytes, outbuf);
+ status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf);
offset[1] += CHUNK_NY;
}
offset[0] += CHUNK_NX;
offset[1] = 0;
}
+ free(outbuf);
+
if(H5Dclose(dataset) < 0)
TEST_ERROR;
@@ -197,13 +200,83 @@ main (void)
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
if(direct_buf[i][j] != check_chunk[i][j]) {
- printf(" Read different values than written.");
+ printf(" 1. Read different values than written.");
printf(" At index %d,%d\n", i, j);
printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ goto error;
}
}
}
+ /* Reinitialize different data for one chunk */
+ for(i = 0; i < CHUNK_NX; i++)
+ for(j = 0; j < CHUNK_NY; j++)
+ direct_buf[i][j] = i + j;
+
+ /* Allocate output (compressed) buffer */
+ outbuf = malloc(z_dst_nbytes);
+ z_dst = (Bytef *)outbuf;
+
+ /* Perform compression from the source to the destination buffer */
+ ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
+
+ /* Check for various zlib errors */
+ if(Z_BUF_ERROR == ret) {
+ fprintf(stderr, "overflow");
+ TEST_ERROR;
+ } else if(Z_MEM_ERROR == ret) {
+ fprintf(stderr, "deflate memory error");
+ TEST_ERROR;
+ } else if(Z_OK != ret) {
+ fprintf(stderr, "other deflate error");
+ TEST_ERROR;
+ }
+
+ /* Rewrite the compressed chunk data repeatedly to cover all the chunks in the
+ * dataset, using the direct writing function. */
+ offset[0] = offset[1] = 0;
+ for(i=0; i<NX/CHUNK_NX; i++) {
+ for(j=0; j<NY/CHUNK_NY; j++) {
+ status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf);
+ offset[1] += CHUNK_NY;
+ }
+ offset[0] += CHUNK_NX;
+ offset[1] = 0;
+ }
+
+ free(outbuf);
+
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR;
+
+ if(H5Fclose(file) < 0)
+ TEST_ERROR;
+
+ /* Reopen the file and dataset */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ if((dataset = H5Dopen(file, DATASETNAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Read the chunk back */
+ if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
+ TEST_ERROR;
+
+ /* Check that the values read are the same as the values written */
+ for(i = 0; i < CHUNK_NX; i++) {
+ for(j = 0; j < CHUNK_NY; j++) {
+ if(direct_buf[i][j] != check_chunk[i][j]) {
+ printf(" 2. Read different values than written.");
+ printf(" At index %d,%d\n", i, j);
+ printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ goto error;
+ }
+ }
+ }
+
+
+
/*
* Close/release resources.
*/
@@ -214,8 +287,6 @@ main (void)
H5Pclose(dxpl);
H5Fclose(file);
- free(outbuf);
-
h5_cleanup(FILENAME, fapl);
PASSED();
return 0;