summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitattributes6
-rw-r--r--CMakeLists.txt6
-rw-r--r--MANIFEST14
-rwxr-xr-xautogen.sh2
-rw-r--r--c++/examples/testh5c++.sh.in2
-rw-r--r--c++/src/H5DataType.cpp4
-rw-r--r--c++/src/H5DcreatProp.cpp2
-rw-r--r--c++/src/H5IdComponent.cpp2
-rw-r--r--c++/src/H5Library.h2
-rw-r--r--c++/src/H5PropList.cpp2
-rw-r--r--c++/src/H5PropList.h2
-rw-r--r--c++/src/cpp_doc_config6
-rw-r--r--c++/src/h5c++.in6
-rw-r--r--c++/test/tfile.cpp4
-rw-r--r--c++/test/th5s.cpp12
-rw-r--r--config/cmake/hdf5-config.cmake.in4
-rw-r--r--configure.ac2
-rw-r--r--java/examples/datasets/JavaDatasetExample.sh.in8
-rw-r--r--java/examples/datasets/Makefile.am2
-rw-r--r--java/examples/datatypes/JavaDatatypeExample.sh.in8
-rw-r--r--java/examples/datatypes/Makefile.am2
-rw-r--r--java/examples/groups/JavaGroupExample.sh.in8
-rw-r--r--java/examples/groups/Makefile.am2
-rw-r--r--java/examples/intro/JavaIntroExample.sh.in8
-rw-r--r--java/examples/intro/Makefile.am2
-rw-r--r--java/lib/ext/slf4j-nop-1.7.25.jarbin0 -> 4007 bytes
-rw-r--r--java/lib/ext/slf4j-nop-1.7.5.jarbin4091 -> 0 bytes
-rw-r--r--java/lib/ext/slf4j-simple-1.7.25.jarbin0 -> 15257 bytes
-rw-r--r--java/lib/ext/slf4j-simple-1.7.5.jarbin10680 -> 0 bytes
-rw-r--r--java/lib/slf4j-api-1.7.25.jarbin0 -> 41203 bytes
-rw-r--r--java/lib/slf4j-api-1.7.5.jarbin26084 -> 0 bytes
-rw-r--r--java/src/Makefile.am2
-rw-r--r--java/src/hdf/hdf5lib/H5.java21
-rw-r--r--java/src/hdf/hdf5lib/HDF5Constants.java3
-rw-r--r--java/src/hdf/hdf5lib/HDFArray.java5
-rw-r--r--java/src/jni/h5Constants.c2
-rw-r--r--java/src/jni/h5aImp.c322
-rw-r--r--java/src/jni/h5aImp.h18
-rw-r--r--java/src/jni/h5dImp.c307
-rw-r--r--java/src/jni/h5dImp.h9
-rw-r--r--java/src/jni/h5util.c710
-rw-r--r--java/src/jni/h5util.h23
-rw-r--r--java/test/Makefile.am2
-rw-r--r--java/test/junit.sh.in78
-rw-r--r--release_docs/INSTALL_CMake.txt346
-rw-r--r--release_docs/RELEASE.txt19
-rw-r--r--release_docs/USING_CMake_Examples.txt2
-rw-r--r--release_docs/USING_HDF5_CMake.txt226
-rw-r--r--src/H5.c4
-rw-r--r--src/H5AC.c6
-rw-r--r--src/H5ACmpio.c8
-rw-r--r--src/H5ACpkg.h2
-rw-r--r--src/H5ACpublic.h6
-rw-r--r--src/H5Aint.c4
-rw-r--r--src/H5Apkg.h2
-rw-r--r--src/H5B.c2
-rw-r--r--src/H5B2internal.c2
-rw-r--r--src/H5Bprivate.h2
-rw-r--r--src/H5C.c24
-rw-r--r--src/H5Cimage.c4
-rw-r--r--src/H5Cmpio.c2
-rw-r--r--src/H5Cpkg.h18
-rw-r--r--src/H5Cprivate.h24
-rw-r--r--src/H5Dbtree2.c2
-rw-r--r--src/H5Dchunk.c45
-rw-r--r--src/H5Dint.c4
-rw-r--r--src/H5Dmpio.c153
-rw-r--r--src/H5Dvirtual.c4
-rw-r--r--src/H5Edeprec.c10
-rw-r--r--src/H5FDdirect.c6
-rw-r--r--src/H5FDfamily.c2
-rw-r--r--src/H5FDmpio.c4
-rw-r--r--src/H5FDmulti.c2
-rw-r--r--src/H5FL.c6
-rw-r--r--src/H5FS.c8
-rw-r--r--src/H5FScache.c6
-rw-r--r--src/H5FSsection.c14
-rw-r--r--src/H5Fpkg.h2
-rw-r--r--src/H5Fprivate.h2
-rw-r--r--src/H5Fsuper.c6
-rw-r--r--src/H5Gname.c2
-rw-r--r--src/H5Gobj.c2
-rw-r--r--src/H5HFcache.c34
-rw-r--r--src/H5HFdtable.c4
-rw-r--r--src/H5HFhdr.c4
-rw-r--r--src/H5HFspace.c2
-rw-r--r--src/H5HFtiny.c2
-rw-r--r--src/H5HGcache.c4
-rw-r--r--src/H5MF.c22
-rw-r--r--src/H5MFaggr.c2
-rw-r--r--src/H5O.c2
-rw-r--r--src/H5Oattr.c2
-rw-r--r--src/H5Oattribute.c2
-rw-r--r--src/H5Ocache.c28
-rw-r--r--src/H5Ocache_image.c2
-rw-r--r--src/H5Ocopy.c2
-rw-r--r--src/H5Ofill.c20
-rw-r--r--src/H5Olayout.c4
-rw-r--r--src/H5Omessage.c6
-rw-r--r--src/H5Oshared.c2
-rw-r--r--src/H5P.c4
-rw-r--r--src/H5Pdcpl.c4
-rw-r--r--src/H5Pdeprec.c4
-rw-r--r--src/H5Pdxpl.c2
-rw-r--r--src/H5Pfapl.c20
-rw-r--r--src/H5Pfcpl.c8
-rw-r--r--src/H5Pint.c34
-rw-r--r--src/H5Ppkg.h10
-rw-r--r--src/H5Ppublic.h2
-rw-r--r--src/H5SMpkg.h2
-rw-r--r--src/H5Sall.c2
-rw-r--r--src/H5Shyper.c20
-rw-r--r--src/H5Smpio.c7
-rw-r--r--src/H5Snone.c2
-rw-r--r--src/H5Spoint.c2
-rw-r--r--src/H5Sselect.c6
-rw-r--r--src/H5T.c2
-rw-r--r--src/H5Tcommit.c2
-rw-r--r--src/H5Tconv.c10
-rw-r--r--src/H5Toffset.c16
-rw-r--r--src/H5Tpkg.h2
-rw-r--r--src/H5Tprivate.h2
-rw-r--r--src/H5Znbit.c4
-rw-r--r--src/H5Zpublic.h2
-rw-r--r--src/H5Zscaleoffset.c4
-rw-r--r--src/H5Ztrans.c8
-rw-r--r--src/H5detect.c12
-rw-r--r--src/H5private.h4
-rw-r--r--src/H5public.h4
-rw-r--r--src/H5system.c10
-rw-r--r--test/cache_common.c2
-rw-r--r--test/cache_common.h2
-rw-r--r--test/cache_image.c24
-rw-r--r--test/cache_tagging.c22
-rw-r--r--test/dtransform.c2
-rw-r--r--test/enum.c2
-rw-r--r--test/mf.c16
-rw-r--r--test/mtime.c2
-rw-r--r--test/page_buffer.c10
-rw-r--r--test/tmisc.c2
-rw-r--r--test/vds.c2
-rw-r--r--testpar/t_filters_parallel.c5036
-rw-r--r--testpar/t_filters_parallel.h333
-rw-r--r--tools/src/h5stat/h5stat.c290
-rw-r--r--tools/test/h5dump/CMakeTests.cmake5
-rw-r--r--tools/test/h5dump/h5dumpgentest.c86
-rw-r--r--tools/test/h5dump/testh5dump.sh.in5
-rw-r--r--tools/test/h5stat/CMakeTests.cmake18
-rw-r--r--tools/test/h5stat/h5stat_gentest.c141
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl2
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_old_fill.h5bin0 -> 2068 bytes
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl2
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_old_layout.h5bin0 -> 2088 bytes
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_refcount.ddl2
-rw-r--r--tools/test/h5stat/testfiles/h5stat_err_refcount.h5bin0 -> 4640 bytes
-rw-r--r--tools/test/h5stat/testfiles/h5stat_help1.ddl1
-rw-r--r--tools/test/h5stat/testfiles/h5stat_help2.ddl1
-rw-r--r--tools/test/h5stat/testfiles/h5stat_nofile.ddl1
-rw-r--r--tools/test/h5stat/testh5stat.sh.in17
-rw-r--r--tools/testfiles/err_attr_dspace.ddl5
-rw-r--r--tools/testfiles/err_attr_dspace.h5bin0 -> 3047 bytes
161 files changed, 6711 insertions, 2278 deletions
diff --git a/.gitattributes b/.gitattributes
index de68733..1ca4739 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -192,12 +192,12 @@ java/examples/testfiles/examples.intro.H5_CreateGroup.txt -text
java/examples/testfiles/examples.intro.H5_CreateGroupAbsoluteRelative.txt -text
java/examples/testfiles/examples.intro.H5_CreateGroupDataset.txt -text
java/examples/testfiles/examples.intro.H5_ReadWrite.txt -text
-java/lib/ext/slf4j-nop-1.7.5.jar -text svneol=unset#application/zip
-java/lib/ext/slf4j-simple-1.7.5.jar -text svneol=unset#application/zip
+java/lib/ext/slf4j-nop-1.7.25.jar -text svneol=unset#application/zip
+java/lib/ext/slf4j-simple-1.7.25.jar -text svneol=unset#application/zip
java/lib/hamcrest-core.jar -text svneol=unset#application/java-archive
java/lib/junit.jar -text svneol=unset#application/java-archive
java/lib/simplelogger.properties -text
-java/lib/slf4j-api-1.7.5.jar -text svneol=unset#application/zip
+java/lib/slf4j-api-1.7.25.jar -text svneol=unset#application/zip
java/src/CMakeLists.txt -text
java/src/Makefile.am -text
java/src/hdf/CMakeLists.txt -text
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f24a957..b0640b9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -195,9 +195,9 @@ set (HDF5_JAVA_JNI_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/jni)
set (HDF5_JAVA_HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/hdf)
set (HDF5_JAVA_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/java/test)
set (HDF5_JAVA_LIB_DIR ${HDF5_SOURCE_DIR}/java/lib)
-set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-1.7.5.jar)
-set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-1.7.5.jar)
-set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-1.7.5.jar)
+set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-1.7.25.jar)
+set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-1.7.25.jar)
+set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-1.7.25.jar)
#-----------------------------------------------------------------------------
# parse the full version number from H5public.h and include in H5_VERS_INFO
diff --git a/MANIFEST b/MANIFEST
index 9ab179f..79c2ef6 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -1562,6 +1562,12 @@
./tools/test/h5stat/testfiles/h5stat_err1_links.ddl
./tools/test/h5stat/testfiles/h5stat_err1_numattrs.ddl
./tools/test/h5stat/testfiles/h5stat_err2_numattrs.ddl
+./tools/test/h5stat/testfiles/h5stat_err_old_fill.h5
+./tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl
+./tools/test/h5stat/testfiles/h5stat_err_old_layout.h5
+./tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl
+./tools/test/h5stat/testfiles/h5stat_err_refcount.h5
+./tools/test/h5stat/testfiles/h5stat_err_refcount.ddl
./tools/test/h5stat/testfiles/h5stat_filters.ddl
./tools/test/h5stat/testfiles/h5stat_filters-d.ddl
./tools/test/h5stat/testfiles/h5stat_filters-dT.ddl
@@ -1597,6 +1603,8 @@
# h5dump test files
./tools/testfiles/charsets.h5
./tools/testfiles/charsets.ddl
+./tools/testfiles/err_attr_dspace.h5
+./tools/testfiles/err_attr_dspace.ddl
./tools/testfiles/family_file00000.h5
./tools/testfiles/family_file00001.h5
./tools/testfiles/family_file00002.h5
@@ -3128,9 +3136,9 @@
./java/lib/hamcrest-core.jar
./java/lib/junit.jar
./java/lib/simplelogger.properties
-./java/lib/slf4j-api-1.7.5.jar
-./java/lib/ext/slf4j-nop-1.7.5.jar
-./java/lib/ext/slf4j-simple-1.7.5.jar
+./java/lib/slf4j-api-1.7.25.jar
+./java/lib/ext/slf4j-nop-1.7.25.jar
+./java/lib/ext/slf4j-simple-1.7.25.jar
# CMake-specific Files
./config/cmake/cacheinit.cmake
diff --git a/autogen.sh b/autogen.sh
index 3d33c06..a8ffd32 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -12,7 +12,7 @@
#
# A script to reconfigure autotools for HDF5, and to recreate other
-# generated files specifc to HDF5.
+# generated files specific to HDF5.
#
# IMPORTANT OS X NOTE
#
diff --git a/c++/examples/testh5c++.sh.in b/c++/examples/testh5c++.sh.in
index 42cb0df..907a980 100644
--- a/c++/examples/testh5c++.sh.in
+++ b/c++/examples/testh5c++.sh.in
@@ -56,7 +56,7 @@ applib=libapp${H5TOOL}.a
# short hands
# Caution: if some *.h5 files must be cleaned here, list them by names.
# Don't use the wildcard form of *.h5 as it will wipe out even *.h5 generated
-# by otehr test programs. This will cause a racing condition error when
+# by other test programs. This will cause a racing condition error when
# parallel make (e.g., gmake -j 4) is used.
temp_SRC="$hdf5main $appmain $prog1 $prog2"
temp_OBJ=`echo $temp_SRC | sed -e "s/\.${suffix}/.o/g"`
diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp
index a6b8c24..e460871 100644
--- a/c++/src/H5DataType.cpp
+++ b/c++/src/H5DataType.cpp
@@ -532,7 +532,7 @@ H5T_conv_t DataType::find(const DataType& dest, H5T_cdata_t **pcdata) const
///\param nelmts - IN: Size of array \a buf
///\param buf - IN/OUT: Array containing pre- and post-conversion
/// values
-///\param background - IN: Optional backgroud buffer
+///\param background - IN: Optional background buffer
///\param plist - IN: Property list - default to PropList::DEFAULT
///\return Pointer to a suitable conversion function
///\exception H5::DataTypeIException
@@ -558,7 +558,7 @@ void DataType::convert(const DataType& dest, size_t nelmts, void *buf, void *bac
///\brief Locks a datatype, making it read-only and non-destructible.
///
///\exception H5::DataTypeIException
-///\par Descrition
+///\par Description
/// This is normally done by the library for predefined data
/// types so the application doesn't inadvertently change or
/// delete a predefined type.
diff --git a/c++/src/H5DcreatProp.cpp b/c++/src/H5DcreatProp.cpp
index 0c2a8c0..c704f85 100644
--- a/c++/src/H5DcreatProp.cpp
+++ b/c++/src/H5DcreatProp.cpp
@@ -666,7 +666,7 @@ void DSetCreatPropList::setFletcher32() const
///\param size - IN: Number of bytes reserved in the file for the data
///\exception H5::PropListIException
///\par Description
-/// If a dataset is splitted across multiple files then the files
+/// If a dataset is split across multiple files then the files
/// should be defined in order. The total size of the dataset is
/// the sum of the \a size arguments for all the external files. If
/// the total size is larger than the size of a dataset then the
diff --git a/c++/src/H5IdComponent.cpp b/c++/src/H5IdComponent.cpp
index fe2d27e..a041273 100644
--- a/c++/src/H5IdComponent.cpp
+++ b/c++/src/H5IdComponent.cpp
@@ -322,7 +322,7 @@ IdComponent::~IdComponent() {}
//
// Implementation of protected functions for HDF5 Reference Interface
-// and miscelaneous helpers.
+// and miscellaneous helpers.
//
#ifndef DOXYGEN_SHOULD_SKIP_THIS
diff --git a/c++/src/H5Library.h b/c++/src/H5Library.h
index 9b4150e..b9be3d0 100644
--- a/c++/src/H5Library.h
+++ b/c++/src/H5Library.h
@@ -20,7 +20,7 @@ namespace H5 {
/*! \class H5Library
\brief Class H5Library operates the HDF5 library globably.
- It is not neccessary to construct an instance of H5Library to use the
+ It is not necessary to construct an instance of H5Library to use the
methods.
*/
class H5_DLLCPP H5Library {
diff --git a/c++/src/H5PropList.cpp b/c++/src/H5PropList.cpp
index ef9e16d..1918d27 100644
--- a/c++/src/H5PropList.cpp
+++ b/c++/src/H5PropList.cpp
@@ -468,7 +468,7 @@ H5std_string PropList::getProperty(const char* name) const
throw PropListIException(inMemFunc("getProperty"), "H5Pget failed");
}
- // Return propety value as a string after deleting temp C-string
+ // Return property value as a string after deleting temp C-string
H5std_string prop_strg(prop_strg_C);
delete []prop_strg_C;
return (prop_strg);
diff --git a/c++/src/H5PropList.h b/c++/src/H5PropList.h
index e0244c1..d704775 100644
--- a/c++/src/H5PropList.h
+++ b/c++/src/H5PropList.h
@@ -92,7 +92,7 @@ class H5_DLLCPP PropList : public IdComponent {
// Determines whether a property list is a certain class.
bool isAClass(const PropList& prop_class) const;
- /// Query the existance of a property in a property object.
+ /// Query the existence of a property in a property object.
bool propExist(const char* name) const;
bool propExist(const H5std_string& name) const;
diff --git a/c++/src/cpp_doc_config b/c++/src/cpp_doc_config
index 3943f7b..4bba5d5 100644
--- a/c++/src/cpp_doc_config
+++ b/c++/src/cpp_doc_config
@@ -1010,7 +1010,7 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more acurate parsing at the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
# cost of reduced performance. This can be particularly helpful with template
# rich C++ code for which doxygen's built-in parser lacks the necessary type
# information.
@@ -1124,7 +1124,7 @@ HTML_STYLESHEET =
# defined cascading style sheet that is included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
+# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet file to the output directory. For an example
# see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
@@ -2009,7 +2009,7 @@ PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
+# remove all references to function-like macros that are alone on a line, have an
# all uppercase name, and do not end with a semicolon. Such function macros are
# typically used for boiler-plate code, and will confuse the parser if not
# removed.
diff --git a/c++/src/h5c++.in b/c++/src/h5c++.in
index 00502d9..f068f51 100644
--- a/c++/src/h5c++.in
+++ b/c++/src/h5c++.in
@@ -38,7 +38,7 @@ HL="@HL@"
## $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CXXFLAGS $CXXFLAGS ##
## $LDFLAGS $LIBS $clibpath $link_objs $link_args $shared_link ##
## ##
-## These settings can be overriden by setting HDF5_CXXFLAGS, ##
+## These settings can be overridden by setting HDF5_CXXFLAGS, ##
## HDF5_CPPFLAGS, HDF5_LDFLAGS, or HDF5_LIBS in the environment. ##
## ##
############################################################################
@@ -140,7 +140,7 @@ usage() {
echo " [default: no except when built with only"
echo " shared libraries]"
echo " You can also add or change paths and flags to the compile line using"
- echo " the following environment varibles or by assigning them to their counterparts"
+ echo " the following environment variables or by assigning them to their counterparts"
echo " in the 'Things You Can Modify to Override...'" section of $prog_name
echo " "
echo " Variable Current value to be replaced"
@@ -311,7 +311,7 @@ fi
if test "x$do_link" = "xyes"; then
shared_link=""
- # conditionnaly link with the hl library
+ # conditionally link with the hl library
if test "X$HL" = "Xhl"; then
libraries=" $libraries -lhdf5_hl_cpp -lhdf5_cpp -lhdf5_hl -lhdf5 "
else
diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp
index ba5b486..c82ab42 100644
--- a/c++/test/tfile.cpp
+++ b/c++/test/tfile.cpp
@@ -76,7 +76,7 @@ const H5std_string FILE4("tfile4.h5");
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hsize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
@@ -282,7 +282,7 @@ static void test_file_create()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hsize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp
index 9c92b64..d4853da 100644
--- a/c++/test/th5s.cpp
+++ b/c++/test/th5s.cpp
@@ -98,7 +98,7 @@ int space5_data = 7;
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
* April 12, 2011: Raymond Lu
* Starting from the 1.8.7 release, we allow dimension
@@ -230,7 +230,7 @@ static void test_h5s_basic()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
@@ -294,7 +294,7 @@ static void test_h5s_scalar_write()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
@@ -356,7 +356,7 @@ static void test_h5s_scalar_read()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
@@ -411,7 +411,7 @@ static void test_h5s_null()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
@@ -484,7 +484,7 @@ static void test_h5s_compound_scalar_write()
* cases. Since there are no operator<< for 'long long'
* or int64 in VS C++ ostream, I casted the hssize_t values
* passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
+ * arises later, this will have to be specifically handled
* with a special routine.
*-------------------------------------------------------------------------
*/
diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in
index 574f541..e687a58 100644
--- a/config/cmake/hdf5-config.cmake.in
+++ b/config/cmake/hdf5-config.cmake.in
@@ -61,8 +61,8 @@ endif ()
if (${HDF5_PACKAGE_NAME}_BUILD_JAVA)
set (${HDF5_PACKAGE_NAME}_JAVA_INCLUDE_DIRS
@PACKAGE_CURRENT_BUILD_DIR@/lib/jarhdf5-@HDF5_VERSION_STRING@.jar
- @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.5.jar
- @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.5.jar
+ @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.25.jar
+ @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.25.jar
)
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARY "@PACKAGE_CURRENT_BUILD_DIR@/lib")
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARIES "${${HDF5_PACKAGE_NAME}_JAVA_LIBRARY}")
diff --git a/configure.ac b/configure.ac
index 1de69c8..dfb0a4d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -411,7 +411,7 @@ AC_CHECK_SIZEOF([double])
AC_CHECK_SIZEOF([long double])
## ----------------------------------------------------------------------
-## Check for non-standard extenstion __FLOAT128
+## Check for non-standard extension __FLOAT128
##
HAVE_FLOAT128=0
HAVE_QUADMATH=0
diff --git a/java/examples/datasets/JavaDatasetExample.sh.in b/java/examples/datasets/JavaDatasetExample.sh.in
index 740b07d..cb16d45 100644
--- a/java/examples/datasets/JavaDatasetExample.sh.in
+++ b/java/examples/datasets/JavaDatasetExample.sh.in
@@ -52,8 +52,8 @@ test -d $BLDLIBDIR || mkdir -p $BLDLIBDIR
# Comment '#' without space can be used.
# --------------------------------------------------------------------
LIST_LIBRARY_FILES="
-$HDFLIB_HOME/slf4j-api-1.7.5.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.5.jar
+$HDFLIB_HOME/slf4j-api-1.7.25.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
$top_builddir/src/.libs/libhdf5.*
$top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
@@ -117,7 +117,7 @@ CLEAN_LIBFILES_AND_BLDLIBDIR()
# skip rm if srcdir is same as destdir
# this occurs when build/test performed in source dir and
# make cp fail
- SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.5.jar`
+ SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.25.jar`
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $BLDLIBDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
@@ -183,7 +183,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.5.jar:"$BLDLIBDIR"/slf4j-simple-1.7.5.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/datasets/Makefile.am b/java/examples/datasets/Makefile.am
index ddd6d39..b442603 100644
--- a/java/examples/datasets/Makefile.am
+++ b/java/examples/datasets/Makefile.am
@@ -28,7 +28,7 @@ classes:
pkgpath = examples/datasets
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)datasets.jar
diff --git a/java/examples/datatypes/JavaDatatypeExample.sh.in b/java/examples/datatypes/JavaDatatypeExample.sh.in
index 590a547..033442a 100644
--- a/java/examples/datatypes/JavaDatatypeExample.sh.in
+++ b/java/examples/datatypes/JavaDatatypeExample.sh.in
@@ -49,8 +49,8 @@ test -d $BLDLIBDIR || mkdir -p $BLDLIBDIR
# Comment '#' without space can be used.
# --------------------------------------------------------------------
LIST_LIBRARY_FILES="
-$HDFLIB_HOME/slf4j-api-1.7.5.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.5.jar
+$HDFLIB_HOME/slf4j-api-1.7.25.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
$top_builddir/src/.libs/libhdf5.*
$top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
@@ -114,7 +114,7 @@ CLEAN_LIBFILES_AND_BLDLIBDIR()
# skip rm if srcdir is same as destdir
# this occurs when build/test performed in source dir and
# make cp fail
- SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.5.jar`
+ SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.25.jar`
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $BLDLIBDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
@@ -179,7 +179,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.5.jar:"$BLDLIBDIR"/slf4j-simple-1.7.5.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/datatypes/Makefile.am b/java/examples/datatypes/Makefile.am
index f6e58f1..5e0e971 100644
--- a/java/examples/datatypes/Makefile.am
+++ b/java/examples/datatypes/Makefile.am
@@ -28,7 +28,7 @@ classes:
pkgpath = examples/datatypes
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)datatypes.jar
diff --git a/java/examples/groups/JavaGroupExample.sh.in b/java/examples/groups/JavaGroupExample.sh.in
index 53c0045..36df02e 100644
--- a/java/examples/groups/JavaGroupExample.sh.in
+++ b/java/examples/groups/JavaGroupExample.sh.in
@@ -51,8 +51,8 @@ test -d $BLDREFDIR || mkdir -p $BLDREFDIR
# Comment '#' without space can be used.
# --------------------------------------------------------------------
LIST_LIBRARY_FILES="
-$HDFLIB_HOME/slf4j-api-1.7.5.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.5.jar
+$HDFLIB_HOME/slf4j-api-1.7.25.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
$top_builddir/src/.libs/libhdf5.*
$top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
@@ -109,7 +109,7 @@ CLEAN_LIBFILES_AND_BLDLIBDIR()
# skip rm if srcdir is same as destdir
# this occurs when build/test performed in source dir and
# make cp fail
- SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.5.jar`
+ SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.25.jar`
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $BLDLIBDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
@@ -225,7 +225,7 @@ COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
COPY_REFFILES_TO_BLDREFDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.5.jar:"$BLDLIBDIR"/slf4j-simple-1.7.5.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/groups/Makefile.am b/java/examples/groups/Makefile.am
index 4da17f9..09df743 100644
--- a/java/examples/groups/Makefile.am
+++ b/java/examples/groups/Makefile.am
@@ -28,7 +28,7 @@ classes:
pkgpath = examples/groups
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)groups.jar
diff --git a/java/examples/intro/JavaIntroExample.sh.in b/java/examples/intro/JavaIntroExample.sh.in
index 3c3036a..077146d 100644
--- a/java/examples/intro/JavaIntroExample.sh.in
+++ b/java/examples/intro/JavaIntroExample.sh.in
@@ -49,8 +49,8 @@ test -d $BLDLIBDIR || mkdir -p $BLDLIBDIR
# Comment '#' without space can be used.
# --------------------------------------------------------------------
LIST_LIBRARY_FILES="
-$HDFLIB_HOME/slf4j-api-1.7.5.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.5.jar
+$HDFLIB_HOME/slf4j-api-1.7.25.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
$top_builddir/src/.libs/libhdf5.*
$top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
@@ -103,7 +103,7 @@ CLEAN_LIBFILES_AND_BLDLIBDIR()
# skip rm if srcdir is same as destdir
# this occurs when build/test performed in source dir and
# make cp fail
- SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.5.jar`
+ SDIR=`$DIRNAME $HDFLIB_HOME/slf4j-api-1.7.25.jar`
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $BLDLIBDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
@@ -168,7 +168,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.5.jar:"$BLDLIBDIR"/slf4j-simple-1.7.5.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/intro/Makefile.am b/java/examples/intro/Makefile.am
index 0df08cb..33f44c2 100644
--- a/java/examples/intro/Makefile.am
+++ b/java/examples/intro/Makefile.am
@@ -28,7 +28,7 @@ classes:
pkgpath = examples/intro
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)intro.jar
diff --git a/java/lib/ext/slf4j-nop-1.7.25.jar b/java/lib/ext/slf4j-nop-1.7.25.jar
new file mode 100644
index 0000000..78c7295
--- /dev/null
+++ b/java/lib/ext/slf4j-nop-1.7.25.jar
Binary files differ
diff --git a/java/lib/ext/slf4j-nop-1.7.5.jar b/java/lib/ext/slf4j-nop-1.7.5.jar
deleted file mode 100644
index e55bdd8..0000000
--- a/java/lib/ext/slf4j-nop-1.7.5.jar
+++ /dev/null
Binary files differ
diff --git a/java/lib/ext/slf4j-simple-1.7.25.jar b/java/lib/ext/slf4j-simple-1.7.25.jar
new file mode 100644
index 0000000..b29ca12
--- /dev/null
+++ b/java/lib/ext/slf4j-simple-1.7.25.jar
Binary files differ
diff --git a/java/lib/ext/slf4j-simple-1.7.5.jar b/java/lib/ext/slf4j-simple-1.7.5.jar
deleted file mode 100644
index 9dece31..0000000
--- a/java/lib/ext/slf4j-simple-1.7.5.jar
+++ /dev/null
Binary files differ
diff --git a/java/lib/slf4j-api-1.7.25.jar b/java/lib/slf4j-api-1.7.25.jar
new file mode 100644
index 0000000..7e62f13
--- /dev/null
+++ b/java/lib/slf4j-api-1.7.25.jar
Binary files differ
diff --git a/java/lib/slf4j-api-1.7.5.jar b/java/lib/slf4j-api-1.7.5.jar
deleted file mode 100644
index 8766455..0000000
--- a/java/lib/slf4j-api-1.7.5.jar
+++ /dev/null
Binary files differ
diff --git a/java/src/Makefile.am b/java/src/Makefile.am
index 64eef0c..bd55c39 100644
--- a/java/src/Makefile.am
+++ b/java/src/Makefile.am
@@ -37,7 +37,7 @@ jarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
hdf5_javadir = $(libdir)
pkgpath = hdf/hdf5lib
-CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$$CLASSPATH
AM_JAVACFLAGS = $(H5_JAVACFLAGS) -deprecation
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index f826fbc..f58623e 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -1746,20 +1746,15 @@ public class H5 implements java.io.Serializable {
status = H5Dread_double(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
(double[]) obj, isCriticalPinning);
}
- else if (H5.H5Tequal(mem_type_id, HDF5Constants.H5T_STD_REF_DSETREG)) {
+ else if ((H5.H5Tdetect_class(mem_type_id, HDF5Constants.H5T_REFERENCE) && (is1D && (dataClass.getComponentType() == String.class))) || H5.H5Tequal(mem_type_id, HDF5Constants.H5T_STD_REF_DSETREG)) {
log.trace("H5Dread_reg_ref");
status = H5Dread_reg_ref(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
(String[]) obj);
}
else if (is1D && (dataClass.getComponentType() == String.class)) {
log.trace("H5Dread_string type");
- if (H5.H5Tis_variable_str(mem_type_id)) {
- status = H5Dread_VLStrings(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, (Object[]) obj);
- }
- else {
- status = H5Dread_string(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
+ status = H5Dread_string(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
(String[]) obj);
- }
}
else {
// Create a data buffer to hold the data into a Java Array
@@ -1979,7 +1974,7 @@ public class H5 implements java.io.Serializable {
Class dataClass = obj.getClass();
if (!dataClass.isArray()) {
- throw (new HDF5JavaException("H5Dread: data is not an array"));
+ throw (new HDF5JavaException("H5Dwrite: data is not an array"));
}
String cname = dataClass.getName();
@@ -2012,13 +2007,8 @@ public class H5 implements java.io.Serializable {
}
else if (is1D && (dataClass.getComponentType() == String.class)) {
log.trace("H5Dwrite_string type");
- if (H5.H5Tis_variable_str(mem_type_id)) {
- status = H5Dwrite_VLStrings(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, (Object[]) obj);
- }
- else {
- status = H5Dwrite_string(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
+ status = H5Dwrite_string(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id,
(String[]) obj);
- }
}
else {
HDFArray theArray = new HDFArray(obj);
@@ -2084,6 +2074,9 @@ public class H5 implements java.io.Serializable {
public synchronized static native int H5Dwrite_string(long dataset_id, long mem_type_id, long mem_space_id,
long file_space_id, long xfer_plist_id, String[] buf) throws HDF5LibraryException, NullPointerException;
+ public synchronized static native int H5DwriteVL(long dataset_id, long mem_type_id, long mem_space_id,
+ long file_space_id, long xfer_plist_id, Object[] buf) throws HDF5LibraryException, NullPointerException;
+
/**
* H5Dwrite_VLStrings writes a (partial) variable length String dataset, specified by its identifier dataset_id, from
* the application memory buffer buf into the file.
diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java
index 43109f2..7eddac0 100644
--- a/java/src/hdf/hdf5lib/HDF5Constants.java
+++ b/java/src/hdf/hdf5lib/HDF5Constants.java
@@ -623,6 +623,7 @@ public class HDF5Constants {
public static final long H5T_UNIX_D64LE = H5T_UNIX_D64LE();
public static final long H5T_VARIABLE = H5T_VARIABLE();
public static final int H5T_VLEN = H5T_VLEN();
+ public static final int H5T_VL_T = H5T_VL_T();
public static final int H5Z_CB_CONT = H5Z_CB_CONT();
public static final int H5Z_CB_ERROR = H5Z_CB_ERROR();
public static final int H5Z_CB_FAIL = H5Z_CB_FAIL();
@@ -1825,6 +1826,8 @@ public class HDF5Constants {
private static native final int H5T_VLEN();
+ private static native final int H5T_VL_T();
+
private static native final int H5Z_CB_CONT();
private static native final int H5Z_CB_ERROR();
diff --git a/java/src/hdf/hdf5lib/HDFArray.java b/java/src/hdf/hdf5lib/HDFArray.java
index 529aecb..30f0fc8 100644
--- a/java/src/hdf/hdf5lib/HDFArray.java
+++ b/java/src/hdf/hdf5lib/HDFArray.java
@@ -1025,8 +1025,11 @@ class ArrayDescriptor {
NTsize = 8;
}
else if (css.startsWith("Ljava.lang.String")) {
+ NT = 'L';
+ className = "java.lang.String";
+ NTsize = 1;
throw new HDF5JavaException(new String(
- "ArrayDesciptor: Error: String array not supported yet"));
+ "ArrayDesciptor: Warning: String array not fully supported yet"));
}
else {
/*
diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c
index 0f4361f..c99745d 100644
--- a/java/src/jni/h5Constants.c
+++ b/java/src/jni/h5Constants.c
@@ -1231,6 +1231,8 @@ JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1VARIABLE(JNIEnv *env, jclass cls) { return (int)H5T_VARIABLE; }
JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1VLEN(JNIEnv *env, jclass cls) { return H5T_VLEN; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5T_1VL_1T(JNIEnv *env, jclass cls) { return sizeof(hvl_t); }
JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1CB_1CONT(JNIEnv *env, jclass cls) { return H5Z_CB_CONT; }
diff --git a/java/src/jni/h5aImp.c b/java/src/jni/h5aImp.c
index 26ec4fc..13f5207 100644
--- a/java/src/jni/h5aImp.c
+++ b/java/src/jni/h5aImp.c
@@ -49,6 +49,10 @@ extern jobject visit_callback;
/* Local Prototypes */
/********************/
+static herr_t H5AwriteVL_asstr (JNIEnv *env, hid_t attr_id, hid_t mem_id, jobjectArray buf);
+static herr_t H5AwriteVL_str (JNIEnv *env, hid_t attr_id, hid_t mem_id, jobjectArray buf);
+static herr_t H5AreadVL_asstr (JNIEnv *env, hid_t attr_id, hid_t mem_id, jobjectArray buf);
+static herr_t H5AreadVL_str (JNIEnv *env, hid_t attr_id, hid_t mem_id, jobjectArray buf);
static herr_t H5A_iterate_cb(hid_t g_id, const char *name, const H5A_info_t *info, void *op_data);
@@ -178,7 +182,6 @@ Java_hdf_hdf5lib_H5_H5Aread
} /* end if */
else {
status = H5Aread((hid_t)attr_id, (hid_t)mem_type_id, byteP);
-
if (status < 0) {
ENVPTR->ReleaseByteArrayElements(ENVPAR buf, byteP, JNI_ABORT);
h5libraryError(env);
@@ -194,6 +197,323 @@ Java_hdf_hdf5lib_H5_H5Aread
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5AwriteVL
+ * Signature: (JJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5AwriteVL
+ (JNIEnv *env, jclass clss, jlong attr_id, jlong mem_type_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ htri_t isStr = 0;
+ htri_t isVlenStr = 0;
+ htri_t isComplex = 0;
+
+ if (buf == NULL) {
+ h5nullArgument(env, "H5AwriteVL: buf is NULL");
+ } /* end if */
+ else {
+ isStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
+ if (H5Tget_class((hid_t)mem_type_id) == H5T_COMPOUND) {
+ unsigned i;
+ int nm = H5Tget_nmembers(mem_type_id);
+ for(i = 0; i <nm; i++) {
+ hid_t nested_tid = H5Tget_member_type((hid_t)mem_type_id, i);
+ isComplex = H5Tdetect_class((hid_t)nested_tid, H5T_COMPOUND) ||
+ H5Tdetect_class((hid_t)nested_tid, H5T_VLEN);
+ H5Tclose(nested_tid);
+ }
+ }
+ else if (H5Tget_class((hid_t)mem_type_id) == H5T_VLEN) {
+ isVlenStr = 1; /* strings created by H5Tvlen_create(H5T_C_S1) */
+ }
+ if (isStr == 0 || isComplex>0 || isVlenStr) {
+ status = H5AwriteVL_asstr(env, (hid_t)attr_id, (hid_t)mem_type_id, buf);
+ }
+ else if (isStr > 0) {
+ status = H5AwriteVL_str(env, (hid_t)attr_id, (hid_t)mem_type_id, buf);
+ }
+ } /* end else */
+
+ return (jint)status;
+} /* end Java_hdf_hdf5lib_H5_H5Awrite_1VL */
+
+herr_t
+H5AwriteVL_str
+ (JNIEnv *env, hid_t aid, hid_t tid, jobjectArray buf)
+{
+ herr_t status = -1;
+ char **wdata;
+ jsize size;
+ jint i;
+
+ size = ENVPTR->GetArrayLength(ENVPAR (jarray) buf);
+
+ wdata = (char**)HDcalloc((size_t)size + 1, sizeof(char*));
+ if (!wdata) {
+ h5JNIFatalError(env, "H5AwriteVL_str: cannot allocate buffer");
+ } /* end if */
+ else {
+ HDmemset(wdata, 0, (size_t)size * sizeof(char*));
+ for (i = 0; i < size; ++i) {
+ jstring obj = (jstring) ENVPTR->GetObjectArrayElement(ENVPAR (jobjectArray) buf, i);
+ if (obj != 0) {
+ jsize length = ENVPTR->GetStringUTFLength(ENVPAR obj);
+ const char *utf8 = ENVPTR->GetStringUTFChars(ENVPAR obj, 0);
+
+ if (utf8) {
+ wdata[i] = (char*)HDmalloc((size_t)length + 1);
+ if (wdata[i]) {
+ HDmemset(wdata[i], 0, ((size_t)length + 1));
+ HDstrncpy(wdata[i], utf8, (size_t)length);
+ } /* end if */
+ } /* end if */
+
+ ENVPTR->ReleaseStringUTFChars(ENVPAR obj, utf8);
+ ENVPTR->DeleteLocalRef(ENVPAR obj);
+ } /* end if */
+ } /* end for (i = 0; i < size; ++i) */
+
+ status = H5Awrite((hid_t)aid, (hid_t)tid, wdata);
+
+ for (i = 0; i < size; i++) {
+ if(wdata[i]) {
+ HDfree(wdata[i]);
+ } /* end if */
+ } /* end for */
+ HDfree(wdata);
+
+ if (status < 0)
+ h5libraryError(env);
+ } /* end else */
+
+ return (jint)status;
+}
+
+herr_t
+H5AwriteVL_asstr
+ (JNIEnv *env, hid_t aid, hid_t tid, jobjectArray buf)
+{
+ char **strs;
+ jstring jstr;
+ jint i;
+ jint n;
+ hid_t sid;
+ hsize_t dims[H5S_MAX_RANK];
+ herr_t status = -1;
+
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ strs =(hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
+
+ if (strs == NULL) {
+ h5JNIFatalError(env, "H5AwriteVL_asstr: failed to allocate buff for read variable length strings");
+ } /* end if */
+ else {
+ status = H5Awrite(aid, tid, strs);
+
+ if (status < 0) {
+ dims[0] = (hsize_t)n;
+ sid = H5Screate_simple(1, dims, NULL);
+ H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, strs);
+ H5Sclose(sid);
+ HDfree(strs);
+ h5JNIFatalError(env, "H5AwriteVL_str: failed to read variable length strings");
+ } /* end if */
+ else {
+ for (i=0; i < n; i++) {
+ jstr = ENVPTR->NewStringUTF(ENVPAR strs[i]);
+ ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
+ H5free_memory (strs[i]);
+ } /* end for */
+
+ /*
+ for repeatedly reading a dataset with a large number of strs (e.g., 1,000,000 strings,
+ H5Dvlen_reclaim() may crash on Windows because the Java GC will not be able to collect
+ free space in time. Instead, use "H5free_memory(strs[i])" above to free individual strings
+ after it is done.
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, strs);
+ */
+
+ HDfree(strs);
+ } /* end else */
+ } /* end else */
+
+ return status;
+} /* end H5AwriteVL_str */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5AreadVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5AreadVL
+ (JNIEnv *env, jclass clss, jlong attr_id, jlong mem_type_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ htri_t isStr = 0;
+ htri_t isVlenStr = 0;
+ htri_t isComplex = 0;
+
+ if (buf == NULL) {
+ h5nullArgument(env, "H5AreadVL: buf is NULL");
+ } /* end if */
+ else {
+ isStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
+ if (H5Tget_class((hid_t)mem_type_id) == H5T_COMPOUND) {
+ unsigned i;
+ int nm = H5Tget_nmembers(mem_type_id);
+ for(i = 0; i <nm; i++) {
+ hid_t nested_tid = H5Tget_member_type((hid_t)mem_type_id, i);
+ isComplex = H5Tdetect_class((hid_t)nested_tid, H5T_COMPOUND) ||
+ H5Tdetect_class((hid_t)nested_tid, H5T_VLEN);
+ H5Tclose(nested_tid);
+ }
+ }
+ else if (H5Tget_class((hid_t)mem_type_id) == H5T_VLEN) {
+ isVlenStr = 1; /* strings created by H5Tvlen_create(H5T_C_S1) */
+ }
+ if (isStr == 0 || isComplex>0 || isVlenStr) {
+ status = H5AreadVL_asstr(env, (hid_t)attr_id, (hid_t)mem_type_id, buf);
+ }
+ else if (isStr > 0) {
+ status = H5AreadVL_str(env, (hid_t)attr_id, (hid_t)mem_type_id, buf);
+ }
+ } /* end else */
+
+ return (jint)status;
+} /* end Java_hdf_hdf5lib_H5_H5Aread_1VL */
+
+herr_t
+H5AreadVL_asstr
+ (JNIEnv *env, hid_t aid, hid_t tid, jobjectArray buf)
+{
+ jint i;
+ jint n;
+ hid_t sid;
+ jstring jstr;
+ h5str_t h5str;
+ hvl_t *rdata;
+ hsize_t dims[H5S_MAX_RANK];
+ size_t size;
+ size_t max_len = 0;
+ herr_t status = -1;
+
+ /* Get size of string array */
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ /* we will need to read n number of hvl_t structures */
+ rdata = (hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
+ if (rdata == NULL) {
+ h5JNIFatalError(env, "H5AreadVL_asstr: failed to allocate buff for read");
+ } /* end if */
+ else {
+ status = H5Aread(aid, tid, rdata);
+
+ if (status < 0) {
+ dims[0] = (hsize_t)n;
+ sid = H5Screate_simple(1, dims, NULL);
+ H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, rdata);
+ H5Sclose(sid);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5AreadVL_asstr: failed to read data");
+ } /* end if */
+ else {
+ /* calculate the largest size of all the hvl_t structures read */
+ max_len = 1;
+ for (i=0; i < n; i++) {
+ if ((rdata + i)->len > max_len)
+ max_len = (rdata + i)->len;
+ }
+
+ /* create one malloc to hold largest element */
+ size = H5Tget_size(tid) * max_len;
+ HDmemset(&h5str, 0, sizeof(h5str_t));
+ h5str_new(&h5str, 4 * size);
+
+ if (h5str.s == NULL) {
+ dims[0] = (hsize_t)n;
+ sid = H5Screate_simple(1, dims, NULL);
+ H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, rdata);
+ H5Sclose(sid);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5AreadVL_asstr: failed to allocate buf");
+ } /* end if */
+ else {
+ H5T_class_t tclass = H5Tget_class(tid);
+ /* convert each element to char string */
+ for (i=0; i < n; i++) {
+ h5str.s[0] = '\0';
+ h5str_vlsprintf(&h5str, aid, tid, rdata+i, 0);
+ jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
+ ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
+ } /* end for */
+ h5str_free(&h5str);
+
+ dims[0] = (hsize_t)n;
+ sid = H5Screate_simple(1, dims, NULL);
+ H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, rdata);
+ H5Sclose(sid);
+ HDfree(rdata);
+ } /* end else */
+ } /* end else */
+ } /* end else */
+
+ return status;
+}
+
+herr_t
+H5AreadVL_str
+ (JNIEnv *env, hid_t aid, hid_t tid, jobjectArray buf)
+{
+ char **strs;
+ jstring jstr;
+ jint i;
+ jint n;
+ hid_t sid;
+ hsize_t dims[H5S_MAX_RANK];
+ herr_t status = -1;
+
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ strs =(char**)HDcalloc((size_t)n, sizeof(char*));
+
+ if (strs == NULL) {
+ h5JNIFatalError(env, "H5AreadVL_str: failed to allocate buff for read variable length strings");
+ } /* end if */
+ else {
+ status = H5Aread(aid, tid, strs);
+
+ if (status < 0) {
+ dims[0] = (hsize_t)n;
+ sid = H5Screate_simple(1, dims, NULL);
+ H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, strs);
+ H5Sclose(sid);
+ HDfree(strs);
+ h5JNIFatalError(env, "H5AreadVL_str: failed to read variable length strings");
+ } /* end if */
+ else {
+ for (i=0; i < n; i++) {
+ jstr = ENVPTR->NewStringUTF(ENVPAR strs[i]);
+ ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
+ H5free_memory (strs[i]);
+ } /* end for */
+
+ /*
+ for repeatedly reading a dataset with a large number of strs (e.g., 1,000,000 strings,
+ H5Dvlen_reclaim() may crash on Windows because the Java GC will not be able to collect
+ free space in time. Instead, use "H5free_memory(strs[i])" above to free individual strings
+ after it is done.
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, strs);
+ */
+
+ HDfree(strs);
+ } /* end else */
+ } /* end else */
+
+ return status;
+} /* end H5AreadVL_str */
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Aget_space
* Signature: (J)J
*/
diff --git a/java/src/jni/h5aImp.h b/java/src/jni/h5aImp.h
index 62769fd..f3758eb 100644
--- a/java/src/jni/h5aImp.h
+++ b/java/src/jni/h5aImp.h
@@ -69,6 +69,24 @@ Java_hdf_hdf5lib_H5_H5Aread
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5AwriteVL
+ * Signature: (JJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5AwriteVL
+ (JNIEnv *, jclass, jlong, jlong, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5AreadVL
+ * Signature: (JJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5AreadVL
+ (JNIEnv *, jclass, jlong, jlong, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Aget_space
* Signature: (J)J
*/
diff --git a/java/src/jni/h5dImp.c b/java/src/jni/h5dImp.c
index 9784055..cea6bb4 100644
--- a/java/src/jni/h5dImp.c
+++ b/java/src/jni/h5dImp.c
@@ -56,6 +56,7 @@ extern jobject visit_callback;
static herr_t H5DreadVL_asstr (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DreadVL_str (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DreadVL_array (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
+static herr_t H5DwriteVL_asstr (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DwriteVL_str (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DwriteVL_array (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
@@ -993,96 +994,6 @@ Java_hdf_hdf5lib_H5_H5Dwrite_1double
/*
* Class: hdf_hdf5lib_H5
- * Method: H5DreadVL
- * Signature: (JJJJJ[Ljava/lang/String;)I
- */
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_H5_H5DreadVL
- (JNIEnv *env, jclass clss, jlong dataset_id, jlong mem_type_id, jlong mem_space_id,
- jlong file_space_id, jlong xfer_plist_id, jobjectArray buf)
-{
- herr_t status = -1;
- htri_t isVlenStr=0;
-
- if (buf == NULL) {
- h5nullArgument(env, "H5DreadVL: buf is NULL");
- } /* end if */
- else {
- isVlenStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
-
- if (isVlenStr)
- h5badArgument(env, "H5DreadVL: type is not variable length non-string");
- else
- status = H5DreadVL_asstr(env, (hid_t)dataset_id, (hid_t)mem_type_id,
- (hid_t)mem_space_id, (hid_t)file_space_id,
- (hid_t)xfer_plist_id, buf);
- } /* end else */
-
- return (jint)status;
-} /* end Java_hdf_hdf5lib_H5_H5Dread_1VL */
-
-herr_t
-H5DreadVL_asstr
- (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf)
-{
- jint i;
- jint n;
- jstring jstr;
- h5str_t h5str;
- hvl_t *rdata;
- size_t size;
- size_t max_len = 0;
- herr_t status = -1;
-
- n = ENVPTR->GetArrayLength(ENVPAR buf);
- rdata = (hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
- if (rdata == NULL) {
- h5JNIFatalError(env, "H5DreadVL_notstr: failed to allocate buff for read");
- } /* end if */
- else {
- status = H5Dread(did, tid, mem_sid, file_sid, xfer_plist_id, rdata);
-
- if (status < 0) {
- H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
- HDfree(rdata);
- h5JNIFatalError(env, "H5DreadVL_notstr: failed to read data");
- } /* end if */
- else {
- max_len = 1;
- for (i=0; i < n; i++) {
- if ((rdata + i)->len > max_len)
- max_len = (rdata + i)->len;
- }
-
- size = H5Tget_size(tid) * max_len;
- HDmemset(&h5str, 0, sizeof(h5str_t));
- h5str_new(&h5str, 4 * size);
-
- if (h5str.s == NULL) {
- H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
- HDfree(rdata);
- h5JNIFatalError(env, "H5DreadVL_notstr: failed to allocate buf");
- } /* end if */
- else {
- for (i=0; i < n; i++) {
- h5str.s[0] = '\0';
- h5str_sprintf(&h5str, did, tid, rdata+i, 0);
- jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
- ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
- } /* end for */
- h5str_free(&h5str);
-
- H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
- HDfree(rdata);
- } /* end else */
- } /* end else */
- } /* end else */
-
- return status;
-}
-
-/*
- * Class: hdf_hdf5lib_H5
* Method: H5Dread_string
* Signature: (JJJJJ[Ljava/lang/String;)I
*/
@@ -1218,6 +1129,120 @@ Java_hdf_hdf5lib_H5_H5Dwrite_1string
return (jint)status;
} /* end Java_hdf_hdf5lib_H5_H5Dwrite_1string */
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5DreadVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5DreadVL
+ (JNIEnv *env, jclass clss, jlong dataset_id, jlong mem_type_id, jlong mem_space_id,
+ jlong file_space_id, jlong xfer_plist_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ htri_t isStr = 0;
+ htri_t isVlenStr = 0;
+ htri_t isComplex = 0;
+
+ if (buf == NULL) {
+ h5nullArgument(env, "H5DreadVL: buf is NULL");
+ } /* end if */
+ else {
+ isStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
+ if (H5Tget_class((hid_t)mem_type_id) == H5T_COMPOUND) {
+ unsigned i;
+ int nm = H5Tget_nmembers(mem_type_id);
+ for(i = 0; i <nm; i++) {
+ hid_t nested_tid = H5Tget_member_type((hid_t)mem_type_id, i);
+ isComplex = H5Tdetect_class((hid_t)nested_tid, H5T_COMPOUND) ||
+ H5Tdetect_class((hid_t)nested_tid, H5T_VLEN);
+ H5Tclose(nested_tid);
+ }
+ }
+ else if (H5Tget_class((hid_t)mem_type_id) == H5T_VLEN) {
+ isVlenStr = 1; /* strings created by H5Tvlen_create(H5T_C_S1) */
+ }
+ if (isStr == 0 || isComplex>0 || isVlenStr) {
+ status = H5DreadVL_asstr(env, (hid_t)dataset_id, (hid_t)mem_type_id,
+ (hid_t)mem_space_id, (hid_t)file_space_id,
+ (hid_t)xfer_plist_id, buf);
+ }
+ else if (isStr > 0) {
+ status = H5DreadVL_str(env, (hid_t)dataset_id, (hid_t)mem_type_id,
+ (hid_t)mem_space_id, (hid_t)file_space_id,
+ (hid_t)xfer_plist_id, buf);
+ }
+ } /* end else */
+
+ return (jint)status;
+} /* end Java_hdf_hdf5lib_H5_H5Dread_1VL */
+
+herr_t
+H5DreadVL_asstr
+ (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf)
+{
+ jint i;
+ jint n;
+ jstring jstr;
+ h5str_t h5str;
+ hvl_t *rdata;
+ size_t size;
+ size_t max_len = 0;
+ herr_t status = -1;
+
+ /* Get size of string array */
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ /* we will need to read n number of hvl_t structures */
+ rdata = (hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
+ if (rdata == NULL) {
+ h5JNIFatalError(env, "H5DreadVL_asstr: failed to allocate buff for read");
+ } /* end if */
+ else {
+ status = H5Dread(did, tid, mem_sid, file_sid, xfer_plist_id, rdata);
+
+ if (status < 0) {
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5DreadVL_asstr: failed to read data");
+ } /* end if */
+ else {
+ /* calculate the largest size of all the hvl_t structures read */
+ max_len = 1;
+ for (i=0; i < n; i++) {
+ if ((rdata + i)->len > max_len)
+ max_len = (rdata + i)->len;
+ }
+
+ /* create one malloc to hold largest element */
+ size = H5Tget_size(tid) * max_len;
+ HDmemset(&h5str, 0, sizeof(h5str_t));
+ h5str_new(&h5str, 4 * size);
+
+ if (h5str.s == NULL) {
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5DreadVL_asstr: failed to allocate buf");
+ } /* end if */
+ else {
+ H5T_class_t tclass = H5Tget_class(tid);
+ /* convert each element to char string */
+ for (i=0; i < n; i++) {
+ h5str.s[0] = '\0';
+ h5str_vlsprintf(&h5str, did, tid, rdata+i, 0);
+ jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
+ ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
+ } /* end for */
+ h5str_free(&h5str);
+
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ } /* end else */
+ } /* end else */
+ } /* end else */
+
+ return status;
+}
+
/**
* Read VLEN data into array of arrays.
* Object[] buf contains VL arrays of data points
@@ -1305,6 +1330,104 @@ H5DreadVL_str
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5DwriteVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5DwriteVL
+ (JNIEnv *env, jclass clss, jlong dataset_id, jlong mem_type_id, jlong mem_space_id,
+ jlong file_space_id, jlong xfer_plist_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ htri_t isStr = 0;
+ htri_t isVlenStr = 0;
+ htri_t isComplex = 0;
+
+ if (buf == NULL) {
+ h5nullArgument(env, "H5DwriteVL: buf is NULL");
+ } /* end if */
+ else {
+ isStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
+ if (H5Tget_class((hid_t)mem_type_id) == H5T_COMPOUND) {
+ unsigned i;
+ int nm = H5Tget_nmembers(mem_type_id);
+ for(i = 0; i <nm; i++) {
+ hid_t nested_tid = H5Tget_member_type((hid_t)mem_type_id, i);
+ isComplex = H5Tdetect_class((hid_t)nested_tid, H5T_COMPOUND) ||
+ H5Tdetect_class((hid_t)nested_tid, H5T_VLEN);
+ H5Tclose(nested_tid);
+ }
+ }
+ else if (H5Tget_class((hid_t)mem_type_id) == H5T_VLEN) {
+ isVlenStr = 1; /* strings created by H5Tvlen_create(H5T_C_S1) */
+ }
+ if (isStr == 0 || isComplex>0 || isVlenStr) {
+ status = H5DwriteVL_asstr(env, (hid_t)dataset_id, (hid_t)mem_type_id,
+ (hid_t)mem_space_id, (hid_t)file_space_id,
+ (hid_t)xfer_plist_id, buf);
+ }
+ else if (isStr > 0) {
+ status = H5DwriteVL_str(env, (hid_t)dataset_id, (hid_t)mem_type_id,
+ (hid_t)mem_space_id, (hid_t)file_space_id,
+ (hid_t)xfer_plist_id, buf);
+ }
+ } /* end else */
+
+ return (jint)status;
+} /* end Java_hdf_hdf5lib_H5_H5Dwrite_1VL */
+
+herr_t
+H5DwriteVL_asstr
+ (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ hvl_t *wdata;
+ jsize size;
+ jint i;
+ jint n;
+
+ /* Get size of string array */
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ wdata = (hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
+
+ if (wdata == NULL) {
+ h5JNIFatalError(env, "H5DwriteVL_asstr: failed to allocate buff for write");
+ } /* end if */
+ else {
+ for (i = 0; i < n; ++i) {
+ jstring obj = (jstring) ENVPTR->GetObjectArrayElement(ENVPAR (jobjectArray)buf, i);
+ if (obj != 0) {
+ jsize length = ENVPTR->GetStringUTFLength(ENVPAR obj);
+ const char *utf8 = ENVPTR->GetStringUTFChars(ENVPAR obj, 0);
+
+ if (utf8) {
+ h5str_vlconvert(utf8, did, tid, wdata+i, 0);
+ } /* end if */
+
+ ENVPTR->ReleaseStringUTFChars(ENVPAR obj, utf8);
+ ENVPTR->DeleteLocalRef(ENVPAR obj);
+ } /* end if */
+ } /* end for (i = 0; i < size; ++i) */
+
+ status = H5Dwrite(did, tid, mem_sid, file_sid, xfer_plist_id, wdata);
+
+ /* now free memory*/
+ for (i = 0; i < n; i++) {
+ if(wdata+i) {
+ HDfree(wdata+i);
+ } /* end if */
+ } /* end for */
+ HDfree(wdata);
+
+ if (status < 0)
+ h5libraryError(env);
+ } /* end else */
+
+ return status;
+} /* end H5DwriteVL_asstr */
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Dwrite_VLStrings
* Signature: (JJJJJ[Ljava/lang/String;)I
*/
@@ -1347,7 +1470,7 @@ H5DwriteVL_str
wdata = (char**)HDmalloc((size_t)size * sizeof (char*));
if (!wdata) {
- h5JNIFatalError(env, "H5DwriteVL_string: cannot allocate buffer");
+ h5JNIFatalError(env, "H5DwriteVL_str: cannot allocate buffer");
} /* end if */
else {
HDmemset(wdata, 0, (size_t)size * sizeof(char*));
@@ -1433,7 +1556,7 @@ Java_hdf_hdf5lib_H5_H5Dread_1reg_1ref
h5str_new(&h5str, 1024);
for (i=0; i<n; i++) {
h5str.s[0] = '\0';
- h5str_sprintf(&h5str, did, tid, ref_data[i], 0);
+ h5str_sprintf(&h5str, did, tid, ref_data[i], 0, 0);
jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
diff --git a/java/src/jni/h5dImp.h b/java/src/jni/h5dImp.h
index 3cf24fe..1fe71a8 100644
--- a/java/src/jni/h5dImp.h
+++ b/java/src/jni/h5dImp.h
@@ -213,6 +213,15 @@ Java_hdf_hdf5lib_H5_H5DreadVL
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5DwriteVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5DwriteVL
+(JNIEnv*, jclass, jlong, jlong, jlong, jlong, jlong, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Dread_string
* Signature: (JJJJJ[Ljava/lang/String;)I
*/
diff --git a/java/src/jni/h5util.c b/java/src/jni/h5util.c
index 9a98202..33dca7f 100644
--- a/java/src/jni/h5util.c
+++ b/java/src/jni/h5util.c
@@ -21,6 +21,7 @@
extern "C" {
#endif /* __cplusplus */
+#include <jni.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
@@ -152,6 +153,513 @@ h5str_append
return HDstrcat(str->s, cstr);
} /* end h5str_append */
+/** print value of a vlen data point into string.
+ Return Value:
+ On success, the total number of characters printed is returned.
+ On error, a negative number is returned.
+ */
+size_t
+h5str_vlconvert
+ (char *str, hid_t container, hid_t tid, hvl_t *ptr, int expand_data)
+{
+ unsigned char tmp_uchar = 0;
+ char tmp_char = 0;
+ unsigned short tmp_ushort = 0;
+ short tmp_short = 0;
+ unsigned int tmp_uint = 0;
+ int tmp_int = 0;
+ unsigned long tmp_ulong = 0;
+ long tmp_long = 0;
+ unsigned long long tmp_ullong = 0;
+ long long tmp_llong = 0;
+ float tmp_float = 0.0;
+ double tmp_double = 0.0;
+ long double tmp_ldouble = 0.0;
+ static char fmt_llong[8], fmt_ullong[8];
+
+ hid_t mtid = -1;
+ size_t offset;
+ size_t nll;
+ char *this_str;
+ size_t this_strlen;
+ int n;
+ H5T_class_t tclass = H5Tget_class(tid);
+ size_t size = H5Tget_size(tid);
+ H5T_sign_t nsign = H5Tget_sign(tid);
+ int bdata_print = 0;
+
+ if (!str || !ptr)
+ return 0;
+
+ this_str = NULL;
+ this_strlen = 0;
+
+ switch (tclass) {
+ case H5T_COMPOUND:
+ {
+ unsigned i;
+ n = H5Tget_nmembers(tid);
+
+ /* remove compound indicators */
+ if (str[0] == ' ')
+ str++;
+ if (str[0] == '{')
+ str++;
+
+ ptr->p = HDcalloc((size_t)1, size);
+ ptr->len = size;
+ for (i = 0; i < n; i++) {
+ offset = H5Tget_member_offset(tid, i);
+ mtid = H5Tget_member_type(tid, i);
+ str += offset;
+ h5str_convert(&str, container, mtid, ptr, 0, expand_data);
+ /* remove compound indicators */
+ if (str[0] == ',')
+ str++;
+ if (str[0] == ' ')
+ str++;
+ H5Tclose(mtid);
+ }
+ /* remove compound indicators */
+ if (str[0] == '}')
+ str++;
+ if (str[0] == ' ')
+ str++;
+ }
+ break;
+ case H5T_ARRAY:
+ {
+ int rank = 0;
+ hsize_t i, dims[H5S_MAX_RANK], total_elmts;
+
+ /* remove array indicators */
+ if (str[0] == '[')
+ str++;
+ if (str[0] == ' ')
+ str++;
+
+ mtid = H5Tget_super(tid);
+ size = H5Tget_size(mtid);
+ rank = H5Tget_array_ndims(tid);
+
+ H5Tget_array_dims2(tid, dims);
+
+ total_elmts = 1;
+ for (i = 0; i < rank; i++)
+ total_elmts *= dims[i];
+
+ ptr->p = HDcalloc((size_t)total_elmts, size);
+ ptr->len = total_elmts;
+ h5str_convert(&str, container, mtid, ptr, 0, expand_data);
+ H5Tclose(mtid);
+ /* remove array indicators */
+ if (str[0] == ' ')
+ str++;
+ if (str[0] == ']')
+ str++;
+ if (str[0] == ' ')
+ str++;
+ }
+ break;
+ default:
+ ptr->len = size;
+ ptr->p = HDcalloc(1, size);
+ this_strlen = h5str_convert(&str, container, tid, ptr, 0, expand_data);
+ break;
+ } /* end switch */
+
+ return this_strlen;
+} /* end h5str_vlconvert */
+
+/** print value of a data point into string.
+ Return Value:
+ On success, the total number of characters printed is returned.
+ On error, a negative number is returned.
+ */
+size_t
+h5str_convert
+ (char **str, hid_t container, hid_t tid, hvl_t *ptr, int ptroffset, int expand_data)
+{
+ unsigned char tmp_uchar = 0;
+ char tmp_char = 0;
+ unsigned short tmp_ushort = 0;
+ short tmp_short = 0;
+ unsigned int tmp_uint = 0;
+ int tmp_int = 0;
+ unsigned long tmp_ulong = 0;
+ long tmp_long = 0;
+ unsigned long long tmp_ullong = 0;
+ long long tmp_llong = 0;
+ float tmp_float = 0.0;
+ double tmp_double = 0.0;
+ long double tmp_ldouble = 0.0;
+ static char fmt_llong[8], fmt_ullong[8];
+ const char delimiter[] = " ,}]";
+
+ char *token;
+ hid_t mtid = -1;
+ size_t offset;
+ size_t nll;
+ char *this_str = *str;
+ size_t this_strlen;
+ int n;
+ char *cptr = ((char*) ((hvl_t *) ptr)->p) + ptroffset;
+ unsigned char *ucptr = ((unsigned char*) ((hvl_t *) ptr)->p) + ptroffset;
+ H5T_class_t tclass = H5Tget_class(tid);
+ size_t size = H5Tget_size(tid);
+ H5T_sign_t nsign = H5Tget_sign(tid);
+ int bdata_print = 0;
+
+ if (!str || !ptr)
+ return 0;
+
+ /* Build default formats for long long types */
+ if (!fmt_llong[0]) {
+ sprintf(fmt_llong, "%%%sd", H5_PRINTF_LL_WIDTH);
+ sprintf(fmt_ullong, "%%%su", H5_PRINTF_LL_WIDTH);
+ } /* end if */
+
+ this_strlen = HDstrlen(this_str);
+
+ switch (tclass) {
+ case H5T_FLOAT:
+ token = HDstrtok (this_str, delimiter);
+ if (sizeof(float) == size) {
+ /* if (H5Tequal(tid, H5T_NATIVE_FLOAT)) */
+ tmp_float = 0;
+ sscanf(token, "%f", &tmp_float);
+ HDmemcpy(cptr, &tmp_float, sizeof(float));
+ }
+ else if (sizeof(double) == size) {
+ /* if (H5Tequal(tid, H5T_NATIVE_DOUBLE)) */
+ tmp_double = 0;
+ sscanf(token, "%%lf", &tmp_double);
+ HDmemcpy(cptr, &tmp_double, sizeof(double));
+ }
+#if H5_SIZEOF_LONG_DOUBLE !=0
+ else if (sizeof(long double) == size) {
+ /* if (H5Tequal(tid, H5T_NATIVE_LDOUBLE)) */
+ tmp_ldouble = 0;
+ sscanf(token, "%Lf", &tmp_ldouble);
+ HDmemcpy(cptr, &tmp_ldouble, sizeof(long double));
+ }
+#endif
+ break;
+ case H5T_STRING:
+ {
+ if (this_strlen > 0) {
+ HDstrncpy(cptr, this_str, size);
+ }
+ else {
+ cptr = NULL;
+ }
+ }
+ break;
+ case H5T_INTEGER:
+ token = HDstrtok (this_str, delimiter);
+ if (sizeof(char) == size) {
+ if(H5T_SGN_NONE == nsign) {
+ /* if (H5Tequal(tid, H5T_NATIVE_UCHAR)) */
+ tmp_uchar = 0;
+ sscanf(token, "%hu", &tmp_uchar);
+ HDmemcpy(cptr, &tmp_uchar, sizeof(unsigned char));
+ }
+ else {
+ /* if (H5Tequal(tid, H5T_NATIVE_SCHAR)) */
+ tmp_char = 0;
+ sscanf(token, "%hd", &tmp_char);
+ HDmemcpy(cptr, &tmp_char, sizeof(char));
+ }
+ }
+ else if (sizeof(int) == size) {
+ if(H5T_SGN_NONE == nsign) {
+ /* if (H5Tequal(tid, H5T_NATIVE_UINT)) */
+ tmp_uint = 0;
+ sscanf(token, "%u", &tmp_uint);
+ HDmemcpy(cptr, &tmp_uint, sizeof(unsigned int));
+ }
+ else {
+ /* if (H5Tequal(tid, H5T_NATIVE_INT)) */
+ tmp_int = 0;
+ sscanf(token, "%d", &tmp_int);
+ HDmemcpy(cptr, &tmp_int, sizeof(int));
+ }
+ }
+ else if (sizeof(short) == size) {
+ if(H5T_SGN_NONE == nsign) {
+ /* if (H5Tequal(tid, H5T_NATIVE_USHORT)) */
+ tmp_ushort = 0;
+ sscanf(token, "%u", &tmp_ushort);
+ HDmemcpy(&tmp_ushort, cptr, sizeof(unsigned short));
+ }
+ else {
+ /* if (H5Tequal(tid, H5T_NATIVE_SHORT)) */
+ tmp_short = 0;
+ sscanf(token, "%d", &tmp_short);
+ HDmemcpy(&tmp_short, cptr, sizeof(short));
+ }
+ }
+ else if (sizeof(long) == size) {
+ if(H5T_SGN_NONE == nsign) {
+ /* if (H5Tequal(tid, H5T_NATIVE_ULONG)) */
+ tmp_ulong = 0;
+ sscanf(token, "%lu", &tmp_ulong);
+ HDmemcpy(cptr, &tmp_ulong, sizeof(unsigned long));
+ }
+ else {
+ /* if (H5Tequal(tid, H5T_NATIVE_LONG)) */
+ tmp_long = 0;
+ sscanf(token, "%ld", &tmp_long);
+ HDmemcpy(cptr, &tmp_long, sizeof(long));
+ }
+ }
+ else if (sizeof(long long) == size) {
+ if(H5T_SGN_NONE == nsign) {
+ /* if (H5Tequal(tid, H5T_NATIVE_ULLONG)) */
+ tmp_ullong = 0;
+ sscanf(token, fmt_ullong, &tmp_ullong);
+ HDmemcpy(cptr, &tmp_ullong, sizeof(unsigned long long));
+ }
+ else {
+ /* if (H5Tequal(tid, H5T_NATIVE_LLONG)) */
+ tmp_llong = 0;
+ sscanf(token, fmt_llong, &tmp_llong);
+ HDmemcpy(cptr, &tmp_llong, sizeof(long long));
+ }
+ }
+ break;
+ case H5T_COMPOUND:
+ {
+ unsigned i;
+ n = H5Tget_nmembers(tid);
+ /* remove compound indicators */
+ if ((*str)[0] == ' ')
+ (*str)++;
+ if ((*str)[0] == '{')
+ (*str)++;
+
+ for (i = 0; i < n; i++) {
+ offset = H5Tget_member_offset(tid, i);
+ mtid = H5Tget_member_type(tid, i);
+ h5str_convert(str, container, mtid, ptr, offset, expand_data);
+ /* remove compound indicators */
+ if ((*str)[0] == ',')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+ H5Tclose(mtid);
+ }
+ /* remove compound indicators */
+ if ((*str)[0] == '}')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+ }
+ break;
+ case H5T_ENUM:
+ {
+ char enum_name[1024];
+ void *value;
+ if (sizeof(char) == size) {
+ tmp_uchar = 0;
+ value = &tmp_uchar;
+ }
+ else if (sizeof(short) == size) {
+ tmp_ushort = 0;
+ value = &tmp_ushort;
+ }
+ else if (sizeof(long) == size) {
+ tmp_ulong = 0;
+ value = &tmp_ulong;
+ }
+ else if (sizeof(long long) == size) {
+ tmp_ullong = 0;
+ value = &tmp_ullong;
+ }
+ else {
+ tmp_uint = 0;
+ value = &tmp_uint;
+ }
+ token = HDstrtok (this_str, delimiter);
+ H5Tenum_valueof(tid, token, value);
+ HDmemcpy(ucptr, value, size);
+ }
+ break;
+ case H5T_REFERENCE:
+ /* TODO handle reference writing */
+ cptr = NULL;
+ break;
+ case H5T_ARRAY:
+ {
+ int rank = 0;
+ hsize_t i, dims[H5S_MAX_RANK], total_elmts;
+ /* remove array indicators */
+ if ((*str)[0] == '[')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+
+ mtid = H5Tget_super(tid);
+ offset = H5Tget_size(mtid);
+ rank = H5Tget_array_ndims(tid);
+
+ H5Tget_array_dims2(tid, dims);
+
+ total_elmts = 1;
+ for (i = 0; i < rank; i++)
+ total_elmts *= dims[i];
+
+ cptr = HDcalloc((size_t)total_elmts, offset);
+ for (i = 0; i < total_elmts; i++) {
+ h5str_convert(str, container, mtid, cptr + (i*offset), offset, expand_data);
+ /* remove array indicators */
+ if ((*str)[0] == ',')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+ }
+ H5Tclose(mtid);
+ /* remove array indicators */
+ if ((*str)[0] == ' ')
+ (*str)++;
+ if ((*str)[0] == ']')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+ }
+ break;
+ case H5T_VLEN:
+ {
+ unsigned int i;
+ mtid = H5Tget_super(tid);
+ offset = H5Tget_size(mtid);
+
+ /* remove vlen indicators */
+ if ((*str)[0] == '{')
+ (*str)++;
+ cptr = HDcalloc(offset, sizeof(hvl_t));
+ for (i = 0; (i*offset) < (int)size; i++) {
+ h5str_convert(str, container, mtid, cptr + (i*offset), offset, expand_data);
+ /* remove vlen indicators */
+ if ((*str)[0] == ',')
+ (*str)++;
+ if ((*str)[0] == ' ')
+ (*str)++;
+ if ((*str)[0] == '}')
+ break;
+ }
+ H5Tclose(mtid);
+ /* remove vlen indicators */
+ if ((*str)[0] == '}')
+ (*str)++;
+ }
+ break;
+
+ default:
+ {
+ /* All other types get copied raw */
+ HDmemcpy(ucptr, this_str, size);
+ }
+ break;
+ } /* end switch */
+
+ return this_strlen;
+} /* end h5str_convert */
+
+/** print value of a vlen data point into string.
+ Return Value:
+ On success, the total number of characters printed is returned.
+ On error, a negative number is returned.
+ */
+size_t
+h5str_vlsprintf
+ (h5str_t *str, hid_t container, hid_t tid, hvl_t *ptr, int expand_data)
+{
+ unsigned char tmp_uchar = 0;
+ char tmp_char = 0;
+ unsigned short tmp_ushort = 0;
+ short tmp_short = 0;
+ unsigned int tmp_uint = 0;
+ int tmp_int = 0;
+ unsigned long tmp_ulong = 0;
+ long tmp_long = 0;
+ unsigned long long tmp_ullong = 0;
+ long long tmp_llong = 0;
+ float tmp_float = 0.0;
+ double tmp_double = 0.0;
+ long double tmp_ldouble = 0.0;
+ static char fmt_llong[8], fmt_ullong[8];
+
+ hid_t mtid = -1;
+ size_t offset;
+ size_t nll;
+ char *this_str;
+ size_t this_strlen;
+ int n;
+ H5T_class_t tclass = H5Tget_class(tid);
+ size_t size = H5Tget_size(tid);
+ H5T_sign_t nsign = H5Tget_sign(tid);
+ int bdata_print = 0;
+
+ if (!str || !ptr)
+ return 0;
+
+ /* Build default formats for long long types */
+ if (!fmt_llong[0]) {
+ sprintf(fmt_llong, "%%%sd", H5_PRINTF_LL_WIDTH);
+ sprintf(fmt_ullong, "%%%su", H5_PRINTF_LL_WIDTH);
+ } /* end if */
+
+ this_str = NULL;
+ this_strlen = 0;
+
+ switch (tclass) {
+ case H5T_COMPOUND:
+ {
+ unsigned i;
+ n = H5Tget_nmembers(tid);
+ h5str_append(str, " {");
+
+ for (i = 0; i < n; i++) {
+ offset = H5Tget_member_offset(tid, i);
+ mtid = H5Tget_member_type(tid, i);
+ h5str_sprintf(str, container, mtid, ((char *) (ptr->p)) + offset, ptr->len, expand_data);
+ if ((i + 1) < n)
+ h5str_append(str, ", ");
+ H5Tclose(mtid);
+ }
+ h5str_append(str, "} ");
+ }
+ break;
+ case H5T_ARRAY:
+ {
+ int rank = 0;
+ hsize_t i, dims[H5S_MAX_RANK], total_elmts;
+ h5str_append(str, "[ ");
+
+ mtid = H5Tget_super(tid);
+ size = H5Tget_size(mtid);
+ rank = H5Tget_array_ndims(tid);
+
+ H5Tget_array_dims2(tid, dims);
+
+ total_elmts = 1;
+ for (i = 0; i < rank; i++)
+ total_elmts *= dims[i];
+
+ h5str_sprintf(str, container, mtid, ((char *) (ptr->p)), ptr->len, expand_data);
+ H5Tclose(mtid);
+ h5str_append(str, " ] ");
+ }
+ break;
+ default:
+ this_strlen = h5str_sprintf(str, container, tid, ((char *) (ptr->p)), ptr->len, expand_data);
+ break;
+ } /* end switch */
+
+ return this_strlen;
+} /* end h5str_vlsprintf */
+
/** print value of a data point into string.
Return Value:
On success, the total number of characters printed is returned.
@@ -159,7 +667,7 @@ h5str_append
*/
size_t
h5str_sprintf
- (h5str_t *str, hid_t container, hid_t tid, void *ptr, int expand_data)
+ (h5str_t *str, hid_t container, hid_t tid, void *ptr, int ptr_len, int expand_data)
{
unsigned char tmp_uchar = 0;
char tmp_char = 0;
@@ -182,9 +690,8 @@ h5str_sprintf
char *this_str;
size_t this_strlen;
int n;
- hvl_t *vlptr;
- char *cptr = (char*) ptr;
- unsigned char *ucptr = (unsigned char*) ptr;
+ char *cptr = (char*) (ptr);
+ unsigned char *ucptr = (unsigned char*) (ptr);
H5T_class_t tclass = H5Tget_class(tid);
size_t size = H5Tget_size(tid);
H5T_sign_t nsign = H5Tget_sign(tid);
@@ -206,20 +713,20 @@ h5str_sprintf
case H5T_FLOAT:
if (sizeof(float) == size) {
/* if (H5Tequal(tid, H5T_NATIVE_FLOAT)) */
- HDmemcpy(&tmp_float, ptr, sizeof(float));
+ HDmemcpy(&tmp_float, cptr, sizeof(float));
this_str = (char*)HDmalloc(25);
sprintf(this_str, "%g", tmp_float);
}
else if (sizeof(double) == size) {
/* if (H5Tequal(tid, H5T_NATIVE_DOUBLE)) */
- HDmemcpy(&tmp_double, ptr, sizeof(double));
+ HDmemcpy(&tmp_double, cptr, sizeof(double));
this_str = (char*)HDmalloc(25);
sprintf(this_str, "%g", tmp_double);
}
#if H5_SIZEOF_LONG_DOUBLE !=0
else if (sizeof(long double) == size) {
/* if (H5Tequal(tid, H5T_NATIVE_LDOUBLE)) */
- HDmemcpy(&tmp_ldouble, ptr, sizeof(long double));
+ HDmemcpy(&tmp_ldouble, cptr, sizeof(long double));
this_str = (char*)HDmalloc(27);
sprintf(this_str, "%Lf", tmp_ldouble);
}
@@ -256,13 +763,13 @@ h5str_sprintf
if (sizeof(char) == size) {
if(H5T_SGN_NONE == nsign) {
/* if (H5Tequal(tid, H5T_NATIVE_UCHAR)) */
- HDmemcpy(&tmp_uchar, ptr, sizeof(unsigned char));
+ HDmemcpy(&tmp_uchar, cptr, sizeof(unsigned char));
this_str = (char*)HDmalloc(7);
sprintf(this_str, "%u", tmp_uchar);
}
else {
/* if (H5Tequal(tid, H5T_NATIVE_SCHAR)) */
- HDmemcpy(&tmp_char, ptr, sizeof(char));
+ HDmemcpy(&tmp_char, cptr, sizeof(char));
this_str = (char*)HDmalloc(7);
sprintf(this_str, "%hhd", tmp_char);
}
@@ -270,13 +777,13 @@ h5str_sprintf
else if (sizeof(int) == size) {
if(H5T_SGN_NONE == nsign) {
/* if (H5Tequal(tid, H5T_NATIVE_UINT)) */
- HDmemcpy(&tmp_uint, ptr, sizeof(unsigned int));
+ HDmemcpy(&tmp_uint, cptr, sizeof(unsigned int));
this_str = (char*)HDmalloc(14);
sprintf(this_str, "%u", tmp_uint);
}
else {
/* if (H5Tequal(tid, H5T_NATIVE_INT)) */
- HDmemcpy(&tmp_int, ptr, sizeof(int));
+ HDmemcpy(&tmp_int, cptr, sizeof(int));
this_str = (char*)HDmalloc(14);
sprintf(this_str, "%d", tmp_int);
}
@@ -284,13 +791,13 @@ h5str_sprintf
else if (sizeof(short) == size) {
if(H5T_SGN_NONE == nsign) {
/* if (H5Tequal(tid, H5T_NATIVE_USHORT)) */
- HDmemcpy(&tmp_ushort, ptr, sizeof(unsigned short));
+ HDmemcpy(&tmp_ushort, cptr, sizeof(unsigned short));
this_str = (char*)HDmalloc(9);
sprintf(this_str, "%u", tmp_ushort);
}
else {
/* if (H5Tequal(tid, H5T_NATIVE_SHORT)) */
- HDmemcpy(&tmp_short, ptr, sizeof(short));
+ HDmemcpy(&tmp_short, cptr, sizeof(short));
this_str = (char*)HDmalloc(9);
sprintf(this_str, "%d", tmp_short);
}
@@ -298,13 +805,13 @@ h5str_sprintf
else if (sizeof(long) == size) {
if(H5T_SGN_NONE == nsign) {
/* if (H5Tequal(tid, H5T_NATIVE_ULONG)) */
- HDmemcpy(&tmp_ulong, ptr, sizeof(unsigned long));
+ HDmemcpy(&tmp_ulong, cptr, sizeof(unsigned long));
this_str = (char*)HDmalloc(23);
sprintf(this_str, "%lu", tmp_ulong);
}
else {
/* if (H5Tequal(tid, H5T_NATIVE_LONG)) */
- HDmemcpy(&tmp_long, ptr, sizeof(long));
+ HDmemcpy(&tmp_long, cptr, sizeof(long));
this_str = (char*)HDmalloc(23);
sprintf(this_str, "%ld", tmp_long);
}
@@ -312,13 +819,13 @@ h5str_sprintf
else if (sizeof(long long) == size) {
if(H5T_SGN_NONE == nsign) {
/* if (H5Tequal(tid, H5T_NATIVE_ULLONG)) */
- HDmemcpy(&tmp_ullong, ptr, sizeof(unsigned long long));
+ HDmemcpy(&tmp_ullong, cptr, sizeof(unsigned long long));
this_str = (char*)HDmalloc(25);
sprintf(this_str, fmt_ullong, tmp_ullong);
}
else {
/* if (H5Tequal(tid, H5T_NATIVE_LLONG)) */
- HDmemcpy(&tmp_llong, ptr, sizeof(long long));
+ HDmemcpy(&tmp_llong, cptr, sizeof(long long));
this_str = (char*)HDmalloc(25);
sprintf(this_str, fmt_llong, tmp_llong);
}
@@ -333,8 +840,8 @@ h5str_sprintf
for (i = 0; i < n; i++) {
offset = H5Tget_member_offset(tid, i);
mtid = H5Tget_member_type(tid, i);
- h5str_sprintf(str, container, mtid, cptr + offset, expand_data);
- if (i < n - 1)
+ h5str_sprintf(str, container, mtid, cptr + offset, ptr_len, expand_data);
+ if ((i + 1) < n)
h5str_append(str, ", ");
H5Tclose(mtid);
}
@@ -344,7 +851,7 @@ h5str_sprintf
case H5T_ENUM:
{
char enum_name[1024];
- if (H5Tenum_nameof(tid, ptr, enum_name, sizeof enum_name) >= 0) {
+ if (H5Tenum_nameof(tid, cptr, enum_name, sizeof enum_name) >= 0) {
h5str_append(str, enum_name);
}
else {
@@ -363,7 +870,7 @@ h5str_sprintf
}
break;
case H5T_REFERENCE:
- if (h5str_is_zero(ptr, size)) {
+ if (h5str_is_zero(cptr, size)) {
h5str_append(str, "NULL");
}
else {
@@ -379,9 +886,9 @@ h5str_sprintf
H5S_sel_type region_type;
/* get name of the dataset the region reference points to using H5Rget_name */
- region_obj = H5Rdereference2(container, H5P_DEFAULT, H5R_DATASET_REGION, ptr);
+ region_obj = H5Rdereference2(container, H5P_DEFAULT, H5R_DATASET_REGION, cptr);
if (region_obj >= 0) {
- region = H5Rget_region(container, H5R_DATASET_REGION, ptr);
+ region = H5Rget_region(container, H5R_DATASET_REGION, cptr);
if (region >= 0) {
if(expand_data) {
region_type = H5Sget_select_type(region);
@@ -393,7 +900,7 @@ h5str_sprintf
}
}
else {
- if(H5Rget_name(region_obj, H5R_DATASET_REGION, ptr, (char*)ref_name, 1024) >= 0) {
+ if(H5Rget_name(region_obj, H5R_DATASET_REGION, cptr, (char*)ref_name, 1024) >= 0) {
h5str_append(str, ref_name);
}
@@ -424,7 +931,7 @@ h5str_sprintf
hid_t obj;
this_str = (char*)HDmalloc(64);
- obj = H5Rdereference2(container, H5P_DEFAULT, H5R_OBJECT, ptr);
+ obj = H5Rdereference2(container, H5P_DEFAULT, H5R_OBJECT, cptr);
H5Oget_info2(obj, &oi, H5O_INFO_ALL);
/* Print object data and close object */
@@ -450,31 +957,30 @@ h5str_sprintf
total_elmts *= dims[i];
for (i = 0; i < total_elmts; i++) {
- h5str_sprintf(str, container, mtid, cptr + i * size, expand_data);
- if (i < total_elmts - 1)
+ h5str_sprintf(str, container, mtid, cptr + i * size, ptr_len, expand_data);
+ if ((i + 1) < total_elmts)
h5str_append(str, ", ");
}
H5Tclose(mtid);
- h5str_append(str, "] ");
+ h5str_append(str, " ] ");
}
break;
case H5T_VLEN:
- {
- unsigned int i;
- mtid = H5Tget_super(tid);
- size = H5Tget_size(mtid);
-
- vlptr = (hvl_t *) cptr;
-
- nll = vlptr->len;
- for (i = 0; i < (int)nll; i++) {
- h5str_sprintf(str, container, mtid, ((char *) (vlptr->p)) + i * size, expand_data);
- if (i < (int)nll - 1)
- h5str_append(str, ", ");
+ {
+ unsigned int i;
+ mtid = H5Tget_super(tid);
+ size = H5Tget_size(mtid);
+
+ h5str_append(str, "{");
+ for (i = 0; i < (int)ptr_len; i++) {
+ h5str_sprintf(str, container, mtid, cptr + i * size, ptr_len, expand_data);
+ if ((i + 1) < (int)ptr_len)
+ h5str_append(str, ", ");
+ }
+ H5Tclose(mtid);
+ h5str_append(str, "}");
}
- H5Tclose(mtid);
- }
- break;
+ break;
default:
{
@@ -563,7 +1069,7 @@ h5str_print_region_data_blocks
if(H5Dread(region_id, type_id, mem_space, sid1, H5P_DEFAULT, region_buf) >= 0) {
if(H5Sget_simple_extent_dims(mem_space, total_size, NULL) >= 0) {
for (numindex = 0; numindex < numelem; numindex++) {
- h5str_sprintf(str, region_id, type_id, ((char*)region_buf + numindex * type_size), 1);
+ h5str_sprintf(str, region_id, type_id, ((char*)region_buf + numindex * type_size), 0, 1);
if (numindex + 1 < numelem)
h5str_append(str, ", ");
@@ -763,7 +1269,7 @@ h5str_print_region_data_points
for (jndx = 0; jndx < npoints; jndx++) {
if(H5Sget_simple_extent_dims(mem_space, total_size, NULL) >= 0) {
- h5str_sprintf(str, region_id, type_id, ((char*)region_buf + jndx * type_size), 1);
+ h5str_sprintf(str, region_id, type_id, ((char*)region_buf + jndx * type_size), 0, 1);
if (jndx + 1 < npoints)
h5str_append(str, ", ");
@@ -1883,7 +2389,7 @@ h5tools_dump_simple_data
/* Render the data element*/
h5str_new(&buffer, 32 * size);
- bytes_in = h5str_sprintf(&buffer, container, type, memref, 1);
+ bytes_in = h5str_sprintf(&buffer, container, type, memref, 0, 1);
if(i > 0) {
HDfprintf(stream, ", ");
if (line_count >= H5TOOLS_TEXT_BLOCK) {
@@ -1909,116 +2415,6 @@ h5tools_dump_simple_data
/*
* Class: hdf_hdf5lib_H5
- * Method: H5AwriteVL
- * Signature: (JJ[Ljava/lang/String;)I
- */
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_H5_H5AwriteVL
- (JNIEnv *env, jclass clss, jlong attr_id, jlong mem_type_id, jobjectArray buf)
-{
- herr_t status = -1;
- char **wdata;
- jsize size;
- jint i;
-
- size = ENVPTR->GetArrayLength(ENVPAR (jarray) buf);
-
- wdata = (char**)HDcalloc((size_t)size + 1, sizeof(char*));
- if (!wdata) {
- h5JNIFatalError(env, "H5AwriteVL: cannot allocate buffer");
- } /* end if */
- else {
- HDmemset(wdata, 0, (size_t)size * sizeof(char*));
- for (i = 0; i < size; ++i) {
- jstring obj = (jstring) ENVPTR->GetObjectArrayElement(ENVPAR (jobjectArray) buf, i);
- if (obj != 0) {
- jsize length = ENVPTR->GetStringUTFLength(ENVPAR obj);
- const char *utf8 = ENVPTR->GetStringUTFChars(ENVPAR obj, 0);
-
- if (utf8) {
- wdata[i] = (char*)HDmalloc((size_t)length + 1);
- if (wdata[i]) {
- HDmemset(wdata[i], 0, ((size_t)length + 1));
- HDstrncpy(wdata[i], utf8, (size_t)length);
- } /* end if */
- } /* end if */
-
- ENVPTR->ReleaseStringUTFChars(ENVPAR obj, utf8);
- ENVPTR->DeleteLocalRef(ENVPAR obj);
- } /* end if */
- } /* end for (i = 0; i < size; ++i) */
-
- status = H5Awrite((hid_t)attr_id, (hid_t)mem_type_id, wdata);
-
- for (i = 0; i < size; i++) {
- if(wdata[i]) {
- HDfree(wdata[i]);
- } /* end if */
- } /* end for */
- HDfree(wdata);
-
- if (status < 0)
- h5libraryError(env);
- } /* end else */
-
- return (jint)status;
-} /* end Java_hdf_hdf5lib_H5_H5AwriteVL */
-
-/*
- * Class: hdf_hdf5lib_H5
- * Method: H5AreadVL
- * Signature: (JJ[Ljava/lang/String;)I
- */
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_H5_H5AreadVL
- (JNIEnv *env, jclass clss, jlong attr_id, jlong mem_type_id, jobjectArray buf)
-{
- herr_t status = -1;
- jstring jstr;
- char **strs;
- int i, n;
- hid_t sid;
- hsize_t dims[H5S_MAX_RANK];
-
- n = ENVPTR->GetArrayLength(ENVPAR buf);
-
- strs =(char **)HDmalloc((size_t)n * sizeof(char *));
- if (strs == NULL) {
- h5JNIFatalError( env, "H5AreadVL: failed to allocate buff for read variable length strings");
- } /* end if */
- else {
- status = H5Aread(attr_id, mem_type_id, strs);
- if (status < 0) {
- dims[0] = (hsize_t)n;
- sid = H5Screate_simple(1, dims, NULL);
- H5Dvlen_reclaim(mem_type_id, sid, H5P_DEFAULT, strs);
- H5Sclose(sid);
- HDfree(strs);
- h5JNIFatalError(env, "H5AreadVL: failed to read variable length strings");
- } /* end if */
- else {
- for (i=0; i<n; i++) {
- jstr = ENVPTR->NewStringUTF(ENVPAR strs[i]);
- ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
- HDfree (strs[i]);
- } /* end for */
-
- /*
- for repeatedly reading an attribute with a large number of strs (e.g., 1,000,000 strings,
- H5Dvlen_reclaim() may crash on Windows because the Java GC will not be able to collect
- free space in time. Instead, use "free(strs[i])" to free individual strings
- after it is done.
- H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, strs);
- */
-
- HDfree(strs);
- } /* end else */
- } /* end else */
- return status;
-} /* end Java_hdf_hdf5lib_H5_H5AreadVL */
-
-/*
- * Class: hdf_hdf5lib_H5
* Method: H5AreadComplex
* Signature: (JJ[Ljava/lang/String;)I
*/
@@ -2059,7 +2455,7 @@ Java_hdf_hdf5lib_H5_H5AreadComplex
else {
for (i = 0; i < n; i++) {
h5str.s[0] = '\0';
- h5str_sprintf(&h5str, attr_id, mem_type_id, rdata + ((size_t)i * size), 0);
+ h5str_sprintf(&h5str, attr_id, mem_type_id, rdata + ((size_t)i * size), 0, 0);
jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
} /* end for */
diff --git a/java/src/jni/h5util.h b/java/src/jni/h5util.h
index 434a107..23bc424 100644
--- a/java/src/jni/h5util.h
+++ b/java/src/jni/h5util.h
@@ -39,7 +39,10 @@ extern void h5str_new (h5str_t *str, size_t len);
extern void h5str_free (h5str_t *str);
extern void h5str_resize (h5str_t *str, size_t new_len);
extern char* h5str_append (h5str_t *str, const char* cstr);
-extern size_t h5str_sprintf(h5str_t *str, hid_t container, hid_t tid, void *buf, int expand_data);
+extern size_t h5str_vlsprintf(h5str_t *str, hid_t container, hid_t tid, hvl_t *buf, int expand_data);
+extern size_t h5str_sprintf(h5str_t *str, hid_t container, hid_t tid, void *buf, int ptrlen, int expand_data);
+extern size_t h5str_vlsconvert(char *str, hid_t container, hid_t tid, hvl_t *buf, int expand_data);
+extern size_t h5str_convert(char **str, hid_t container, hid_t tid, hvl_t *buf, int ptroffset, int expand_data);
extern void h5str_array_free(char **strs, size_t len);
extern int h5str_dump_simple_dset(FILE *stream, hid_t dset, int binary_order);
extern int h5str_dump_region_blocks_data(h5str_t *str, hid_t region, hid_t region_obj);
@@ -47,24 +50,6 @@ extern int h5str_dump_region_points_data(h5str_t *str, hid_t region, hid_t r
/*
* Class: hdf_hdf5lib_H5
- * Method: H5AwriteVL
- * Signature: (JJ[Ljava/lang/String;)I
- */
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_H5_H5AwriteVL
- (JNIEnv *, jclass, jlong, jlong, jobjectArray);
-
-/*
- * Class: hdf_hdf5lib_H5
- * Method: H5AreadVL
- * Signature: (JJ[Ljava/lang/String;)I
- */
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_H5_H5AreadVL
- (JNIEnv *, jclass, jlong, jlong, jobjectArray);
-
-/*
- * Class: hdf_hdf5lib_H5
* Method: H5AreadComplex
* Signature: (JJ[Ljava/lang/String;)I
*/
diff --git a/java/test/Makefile.am b/java/test/Makefile.am
index 65ff43f..6635ef7 100644
--- a/java/test/Makefile.am
+++ b/java/test/Makefile.am
@@ -28,7 +28,7 @@ classes:
pkgpath = test
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-1.7.5.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.5.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)test.jar
diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in
index 07c3242..5d69bee 100644
--- a/java/test/junit.sh.in
+++ b/java/test/junit.sh.in
@@ -62,8 +62,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
LIST_JAR_FILES="
$HDFLIB_HOME/hamcrest-core.jar
$HDFLIB_HOME/junit.jar
-$HDFLIB_HOME/slf4j-api-1.7.5.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.5.jar
+$HDFLIB_HOME/slf4j-api-1.7.25.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
$top_builddir/java/src/$JARFILE
"
LIST_DATA_FILES="
@@ -249,7 +249,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-1.7.5.jar:"$BLDLIBDIR"/slf4j-simple-1.7.5.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
@@ -309,7 +309,9 @@ if diff JUnit-TestH5.out JUnit-TestH5.txt > /dev/null; then
echo " PASSED JUnit-TestH5"
else
echo "**FAILED** JUnit-TestH5"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5.txt JUnit-TestH5.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Eparams"
@@ -328,7 +330,9 @@ if diff JUnit-TestH5Eparams.out JUnit-TestH5Eparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Eparams"
else
echo "**FAILED** JUnit-TestH5Eparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Eparams.txt JUnit-TestH5Eparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Eregister"
@@ -347,7 +351,9 @@ if diff JUnit-TestH5Eregister.out JUnit-TestH5Eregister.txt > /dev/null; then
echo " PASSED JUnit-TestH5Eregister"
else
echo "**FAILED** JUnit-TestH5Eregister"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Eregister.txt JUnit-TestH5Eregister.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fparams"
@@ -366,7 +372,9 @@ if diff JUnit-TestH5Fparams.out JUnit-TestH5Fparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Fparams"
else
echo "**FAILED** JUnit-TestH5Fparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Fparams.txt JUnit-TestH5Fparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fbasic"
@@ -385,7 +393,9 @@ if diff JUnit-TestH5Fbasic.out JUnit-TestH5Fbasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Fbasic"
else
echo "**FAILED** JUnit-TestH5Fbasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Fbasic.txt JUnit-TestH5Fbasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5F"
@@ -404,7 +414,9 @@ if diff JUnit-TestH5F.out JUnit-TestH5F.txt > /dev/null; then
echo " PASSED JUnit-TestH5F"
else
echo "**FAILED** JUnit-TestH5F"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5F.txt JUnit-TestH5F.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fswmr"
@@ -423,7 +435,9 @@ if diff JUnit-TestH5Fswmr.out JUnit-TestH5Fswmr.txt > /dev/null; then
echo " PASSED JUnit-TestH5Fswmr"
else
echo "**FAILED** JUnit-TestH5Fswmr"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Fswmr.txt JUnit-TestH5Fswmr.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Gbasic"
@@ -442,7 +456,9 @@ if diff JUnit-TestH5Gbasic.out JUnit-TestH5Gbasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Gbasic"
else
echo "**FAILED** JUnit-TestH5Gbasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Gbasic.txt JUnit-TestH5Gbasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5G"
@@ -461,7 +477,9 @@ if diff JUnit-TestH5G.out JUnit-TestH5G.txt > /dev/null; then
echo " PASSED JUnit-TestH5G"
else
echo "**FAILED** JUnit-TestH5G"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5G.txt JUnit-TestH5G.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Sbasic"
@@ -480,7 +498,9 @@ if diff JUnit-TestH5Sbasic.out JUnit-TestH5Sbasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Sbasic"
else
echo "**FAILED** JUnit-TestH5Sbasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Sbasic.txt JUnit-TestH5Sbasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5S"
@@ -499,7 +519,9 @@ if diff JUnit-TestH5S.out JUnit-TestH5S.txt > /dev/null; then
echo " PASSED JUnit-TestH5S"
else
echo "**FAILED** JUnit-TestH5S"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5S.txt JUnit-TestH5S.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Tparams"
@@ -518,7 +540,9 @@ if diff JUnit-TestH5Tparams.out JUnit-TestH5Tparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Tparams"
else
echo "**FAILED** JUnit-TestH5Tparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Tparams.txt JUnit-TestH5Tparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Tbasic"
@@ -537,7 +561,9 @@ if diff JUnit-TestH5Tbasic.out JUnit-TestH5Tbasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Tbasic"
else
echo "**FAILED** JUnit-TestH5Tbasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Tbasic.txt JUnit-TestH5Tbasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5T"
@@ -556,7 +582,9 @@ if diff JUnit-TestH5T.out JUnit-TestH5T.txt > /dev/null; then
echo " PASSED JUnit-TestH5T"
else
echo "**FAILED** JUnit-TestH5T"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5T.txt JUnit-TestH5T.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Dparams"
@@ -575,7 +603,9 @@ if diff JUnit-TestH5Dparams.out JUnit-TestH5Dparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Dparams"
else
echo "**FAILED** JUnit-TestH5Dparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Dparams.txt JUnit-TestH5Dparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5D"
@@ -594,7 +624,9 @@ if diff JUnit-TestH5D.out JUnit-TestH5D.txt > /dev/null; then
echo " PASSED JUnit-TestH5D"
else
echo "**FAILED** JUnit-TestH5D"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5D.txt JUnit-TestH5D.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Dplist"
@@ -613,7 +645,9 @@ if diff JUnit-TestH5Dplist.out JUnit-TestH5Dplist.txt > /dev/null; then
echo " PASSED JUnit-TestH5Dplist"
else
echo "**FAILED** JUnit-TestH5Dplist"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Dplist.txt JUnit-TestH5Dplist.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lparams"
@@ -632,7 +666,9 @@ if diff JUnit-TestH5Lparams.out JUnit-TestH5Lparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Lparams"
else
echo "**FAILED** JUnit-TestH5Lparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Lparams.txt JUnit-TestH5Lparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lbasic"
@@ -651,7 +687,9 @@ if diff JUnit-TestH5Lbasic.out JUnit-TestH5Lbasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Lbasic"
else
echo "**FAILED** JUnit-TestH5Lbasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Lbasic.txt JUnit-TestH5Lbasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lcreate"
@@ -670,7 +708,9 @@ if diff JUnit-TestH5Lcreate.out JUnit-TestH5Lcreate.txt > /dev/null; then
echo " PASSED JUnit-TestH5Lcreate"
else
echo "**FAILED** JUnit-TestH5Lcreate"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Lcreate.txt JUnit-TestH5Lcreate.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5R"
@@ -689,7 +729,9 @@ if diff JUnit-TestH5R.out JUnit-TestH5R.txt > /dev/null; then
echo " PASSED JUnit-TestH5R"
else
echo "**FAILED** JUnit-TestH5R"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5R.txt JUnit-TestH5R.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5P"
@@ -708,7 +750,9 @@ if diff JUnit-TestH5P.out JUnit-TestH5P.txt > /dev/null; then
echo " PASSED JUnit-TestH5P"
else
echo "**FAILED** JUnit-TestH5P"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5P.txt JUnit-TestH5P.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5PData"
@@ -727,7 +771,9 @@ if diff JUnit-TestH5PData.out JUnit-TestH5PData.txt > /dev/null; then
echo " PASSED JUnit-TestH5PData"
else
echo "**FAILED** JUnit-TestH5PData"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5PData.txt JUnit-TestH5PData.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapl"
@@ -746,7 +792,9 @@ if diff JUnit-TestH5Pfapl.out JUnit-TestH5Pfapl.txt > /dev/null; then
echo " PASSED JUnit-TestH5Pfapl"
else
echo "**FAILED** JUnit-TestH5Pfapl"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Pfapl.txt JUnit-TestH5Pfapl.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pvirtual"
@@ -765,7 +813,9 @@ if diff JUnit-TestH5Pvirtual.out JUnit-TestH5Pvirtual.txt > /dev/null; then
echo " PASSED JUnit-TestH5Pvirtual"
else
echo "**FAILED** JUnit-TestH5Pvirtual"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Pvirtual.txt JUnit-TestH5Pvirtual.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Plist"
@@ -784,7 +834,9 @@ if diff JUnit-TestH5Plist.out JUnit-TestH5Plist.txt > /dev/null; then
echo " PASSED JUnit-TestH5Plist"
else
echo "**FAILED** JUnit-TestH5Plist"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Plist.txt JUnit-TestH5Plist.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5A"
@@ -803,7 +855,9 @@ if diff JUnit-TestH5A.out JUnit-TestH5A.txt > /dev/null; then
echo " PASSED JUnit-TestH5A"
else
echo "**FAILED** JUnit-TestH5A"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5A.txt JUnit-TestH5A.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Oparams"
@@ -822,7 +876,9 @@ if diff JUnit-TestH5Oparams.out JUnit-TestH5Oparams.txt > /dev/null; then
echo " PASSED JUnit-TestH5Oparams"
else
echo "**FAILED** JUnit-TestH5Oparams"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Oparams.txt JUnit-TestH5Oparams.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Obasic"
@@ -841,7 +897,9 @@ if diff JUnit-TestH5Obasic.out JUnit-TestH5Obasic.txt > /dev/null; then
echo " PASSED JUnit-TestH5Obasic"
else
echo "**FAILED** JUnit-TestH5Obasic"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Obasic.txt JUnit-TestH5Obasic.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocreate"
@@ -860,7 +918,9 @@ if diff JUnit-TestH5Ocreate.out JUnit-TestH5Ocreate.txt > /dev/null; then
echo " PASSED JUnit-TestH5Ocreate"
else
echo "**FAILED** JUnit-TestH5Ocreate"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Ocreate.txt JUnit-TestH5Ocreate.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocopy"
@@ -879,7 +939,9 @@ if diff JUnit-TestH5Ocopy.out JUnit-TestH5Ocopy.txt > /dev/null; then
echo " PASSED JUnit-TestH5Ocopy"
else
echo "**FAILED** JUnit-TestH5Ocopy"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Ocopy.txt JUnit-TestH5Ocopy.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5PL"
@@ -898,7 +960,9 @@ if diff JUnit-TestH5PL.out JUnit-TestH5PL.txt > /dev/null; then
echo " PASSED JUnit-TestH5PL"
else
echo "**FAILED** JUnit-TestH5PL"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5PL.txt JUnit-TestH5PL.out |sed 's/^/ /'
fi
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Z"
@@ -917,7 +981,9 @@ if diff JUnit-TestH5Z.out JUnit-TestH5Z.txt > /dev/null; then
echo " PASSED JUnit-TestH5Z"
else
echo "**FAILED** JUnit-TestH5Z"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Z.txt JUnit-TestH5Z.out |sed 's/^/ /'
fi
if test "X-$BUILD_MODE" = "X-production" ; then
@@ -938,7 +1004,9 @@ if test "X-$BUILD_MODE" = "X-production" ; then
echo " PASSED JUnit-TestH5E"
else
echo "**FAILED** JUnit-TestH5E"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5E.txt JUnit-TestH5E.out |sed 's/^/ /'
fi
fi
@@ -959,7 +1027,9 @@ if test "X-$BUILD_MODE" = "X-production" ; then
echo " PASSED JUnit-TestH5Edefault"
else
echo "**FAILED** JUnit-TestH5Edefault"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Edefault.txt JUnit-TestH5Edefault.out |sed 's/^/ /'
fi
fi
fi
@@ -980,7 +1050,9 @@ if test $USE_FILTER_SZIP = "yes"; then
echo " PASSED JUnit-TestH5Giterate"
else
echo "**FAILED** JUnit-TestH5Giterate"
+ echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5Giterate.txt JUnit-TestH5Giterate.out |sed 's/^/ /'
fi
fi
diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt
index a2d209a..a5864be 100644
--- a/release_docs/INSTALL_CMake.txt
+++ b/release_docs/INSTALL_CMake.txt
@@ -12,7 +12,6 @@ Section IV: Further considerations
Section V: Options for building HDF5 Libraries with CMake command line
Section VI: CMake option defaults for HDF5
Section VII: User Defined Options for HDF5 Libraries with CMake
-Section VIII: Options for platform configuration files
************************************************************************
@@ -35,7 +34,8 @@ CMake version
Note:
To change the install prefix from the platform defaults initialize
- the CMake variable, CMAKE_INSTALL_PREFIX.
+ the CMake variable, CMAKE_INSTALL_PREFIX. Users of build scripts
+ will use the INSTALLDIR option.
========================================================================
@@ -89,14 +89,14 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to:
5. From the "myhdfstuff" directory execute the CTest Script with the
following options:
+ On 32-bit Windows with Visual Studio 2017, execute:
+ ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2017 -C Release -VV -O hdf5.log
+ On 64-bit Windows with Visual Studio 2017, execute:
+ ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201764 -C Release -VV -O hdf5.log
On 32-bit Windows with Visual Studio 2015, execute:
ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2015 -C Release -VV -O hdf5.log
On 64-bit Windows with Visual Studio 2015, execute:
ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201564 -C Release -VV -O hdf5.log
- On 32-bit Windows with Visual Studio 2013, execute:
- ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2013 -C Release -VV -O hdf5.log
- On 64-bit Windows with Visual Studio 2013, execute:
- ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201364 -C Release -VV -O hdf5.log
On Linux and Mac, execute:
ctest -S HDF5config.cmake,BUILD_GENERATOR=Unix -C Release -VV -O hdf5.log
The supplied build scripts are versions of the above.
@@ -167,6 +167,14 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to:
By default the installation will create the bin, include, lib and cmake
folders in the <install destination directory>/HDF_Group/HDF5/1.10."X"
+ The <install destination directory> depends on the build platform;
+ Windows will set the default to:
+ C:/Program Files/HDF_Group/HDF5/1.10."X"
+ Linux will set the default to:
+ "myhdfstuff/HDF_Group/HDF5/1.10."X"
+ The default can be changed by adding ",INSTALLDIR=<my new dir>" to the
+ "ctest -S HDF5config.cmake..." command. For example on linux:
+ ctest -S HDF5config.cmake,INSTALLDIR=/usr/local/myhdf5,BUILD_GENERATOR=Unix -C Release -VV -O hdf5.log
========================================================================
@@ -651,332 +659,6 @@ UserMacros.cmake file. Then enable the option to the CMake configuration,
build and test process.
========================================================================
-VIII. Options for Platform Configuration Files
-========================================================================
-
-Below is the HDF5config.cmake and HDF5options.cmake ctest scripts.
-Execute:
- ctest -S HDF5config.cmake,BUILD_GENERATOR=xxx -C Release -VV -O hdf5.log
-The same scripts can be used on Linux, Mac OSX or a Windows machine by
-adding an option (${CTEST_SCRIPT_ARG}) to the platform configuration script.
-
-
-#############################################################################################
-### ${CTEST_SCRIPT_ARG} is of the form OPTION=VALUE ###
-### BUILD_GENERATOR required [Unix, VS2017, VS201764, VS2015, VS201564, VS2013, VS201364] ###
-### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201764 -C Release -VV -O hdf5.log ###
-#############################################################################################
-
-cmake_minimum_required (VERSION 3.10)
-############################################################################
-# Usage:
-# ctest -S HDF5config.cmake,OPTION=VALUE -C Release -VV -O test.log
-# where valid options for OPTION are:
-# BUILD_GENERATOR - The cmake build generator:
-# Unix * Unix Makefiles
-# VS2017 * Visual Studio 15 2017
-# VS201764 * Visual Studio 15 2017 Win64
-# VS2015 * Visual Studio 14 2015
-# VS201564 * Visual Studio 14 2015 Win64
-# VS2013 * Visual Studio 12 2013
-# VS201364 * Visual Studio 12 2013 Win64
-#
-# INSTALLDIR - root folder where hdf5 is installed
-# CTEST_CONFIGURATION_TYPE - Release, Debug, etc
-# CTEST_SOURCE_NAME - source folder
-##############################################################################
-
-set (CTEST_SOURCE_VERSION "1.11.0")
-set (CTEST_SOURCE_VERSEXT "")
-
-##############################################################################
-# handle input parameters to script.
-#BUILD_GENERATOR - which CMake generator to use, required
-#INSTALLDIR - HDF5-1.10.0 root folder
-#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
-#CTEST_SOURCE_NAME - name of source folder; HDF5-1.10.0
-if (DEFINED CTEST_SCRIPT_ARG)
- # transform ctest script arguments of the form
- # script.ctest,var1=value1,var2=value2
- # to variables with the respective names set to the respective values
- string (REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach (current_var ${script_args})
- if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
- set ("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif ()
- endforeach ()
-endif ()
-
-# build generator must be defined
-if (NOT DEFINED BUILD_GENERATOR)
- message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2017, or VS201764, VS2015, VS201564, VS2013, VS201364")
-endif ()
-
-###################################################################
-### Following Line is one of [Release, RelWithDebInfo, Debug] #####
-set (CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
-###################################################################
-
-if (NOT DEFINED INSTALLDIR)
- if (WIN32)
- set (INSTALLDIR "C:/Program Files/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
- else ()
- set (INSTALLDIR "${CTEST_SCRIPT_DIRECTORY}/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
- endif ()
-endif ()
-if (NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set (CTEST_CONFIGURATION_TYPE "Release")
-endif ()
-if (NOT DEFINED CTEST_SOURCE_NAME)
- set (CTEST_SOURCE_NAME "hdf5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
-endif ()
-if (NOT DEFINED STATIC_ONLY)
- set (STATICONLYLIBRARIES "YES")
-else ()
- set (STATICONLYLIBRARIES "NO")
-endif ()
-if (NOT DEFINED FORTRAN_LIBRARIES)
- set (FORTRANLIBRARIES "NO")
-else ()
- set(FORTRANLIBRARIES "YES")
-endif ()
-if (NOT DEFINED JAVA_LIBRARIES)
- set (JAVALIBRARIES "NO")
-else ()
- set (JAVALIBRARIES "YES")
-endif ()
-
-set (CTEST_BINARY_NAME "build")
-set (CTEST_DASHBOARD_ROOT "${CTEST_SCRIPT_DIRECTORY}")
-if (WIN32)
- set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else ()
- set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
-endif ()
-
-###################################################################
-######### Following describes compiler ############
-if (WIN32)
- set (SITE_OS_NAME "Windows")
- set (SITE_OS_VERSION "WIN7")
- if (${BUILD_GENERATOR} STREQUAL "VS201764")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017 Win64")
- set (SITE_OS_BITS "64")
- set (SITE_COMPILER_NAME "vs2017")
- set (SITE_COMPILER_VERSION "15")
- elseif (${BUILD_GENERATOR} STREQUAL "VS2017")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017")
- set (SITE_OS_BITS "32")
- set (SITE_COMPILER_NAME "vs2017")
- set (SITE_COMPILER_VERSION "15")
- elseif (${BUILD_GENERATOR} STREQUAL "VS201564")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64")
- set (SITE_OS_BITS "64")
- set (SITE_COMPILER_NAME "vs2015")
- set (SITE_COMPILER_VERSION "14")
- elseif (${BUILD_GENERATOR} STREQUAL "VS2015")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015")
- set (SITE_OS_BITS "32")
- set (SITE_COMPILER_NAME "vs2015")
- set (SITE_COMPILER_VERSION "14")
- elseif (${BUILD_GENERATOR} STREQUAL "VS201364")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64")
- set (SITE_OS_BITS "64")
- set (SITE_COMPILER_NAME "vs2013")
- set (SITE_COMPILER_VERSION "12")
- elseif (${BUILD_GENERATOR} STREQUAL "VS2013")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013")
- set (SITE_OS_BITS "32")
- set (SITE_COMPILER_NAME "vs2013")
- set (SITE_COMPILER_VERSION "12")
- elseif (${BUILD_GENERATOR} STREQUAL "VS201264")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64")
- set (SITE_OS_BITS "64")
- set (SITE_COMPILER_NAME "vs2012")
- set (SITE_COMPILER_VERSION "11")
- elseif (${BUILD_GENERATOR} STREQUAL "VS2012")
- set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012")
- set (SITE_OS_BITS "32")
- set (SITE_COMPILER_NAME "vs2012")
- set (SITE_COMPILER_VERSION "11")
- else ()
- message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2017, or VS201764, VS2015, VS201564, VS2013, VS201364")
- endif ()
-## Set the following to unique id your computer ##
- set (CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
-else ()
- set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
-## Set the following to unique id your computer ##
- if (APPLE)
- set (CTEST_SITE "MAC.XXXX")
- else ()
- set (CTEST_SITE "LINUX.XXXX")
- endif ()
- if (APPLE)
- execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
- execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
- set (ENV{CC} "${XCODE_CC}")
- set (ENV{CXX} "${XCODE_CXX}")
- set (CTEST_USE_LAUNCHERS 1)
- set (RR_WARNINGS_COMMON "-Wno-format-nonliteral -Wno-cast-align -Wno-unused -Wno-unused-variable -Wno-unused-function -Wno-self-assign -Wno-unused-parameter -Wno-sign-compare")
- set (RR_WARNINGS_C "${RR_WARNINGS_COMMON} -Wno-deprecated-declarations -Wno-uninitialized")
- set (RR_WARNINGS_CXX "${RR_WARNINGS_COMMON} -Woverloaded-virtual -Wshadow -Wwrite-strings -Wc++11-compat")
- set (RR_FLAGS_COMMON "-g -O0 -fstack-protector-all -D_FORTIFY_SOURCE=2")
- set (RR_FLAGS_C "${RR_FLAGS_COMMON}")
- set (RR_FLAGS_CXX "${RR_FLAGS_COMMON}")
- set (ENV{CFLAGS} "${RR_WARNINGS_C} ${RR_FLAGS_C}")
- set (ENV{CXXFLAGS} "${RR_WARNINGS_CXX} ${RR_FLAGS_CXX}")
- endif ()
-endif ()
-###################################################################
-
-###################################################################
-######### Following is for submission to CDash ############
-###################################################################
-set (MODEL "Experimental")
-###################################################################
-
-###################################################################
-##### Following controls CDash submission #####
-#set (LOCAL_SUBMIT "TRUE")
-##### Following controls test process #####
-#set (LOCAL_SKIP_TEST "TRUE")
-#set (LOCAL_MEMCHECK_TEST "TRUE")
-#set (LOCAL_COVERAGE_TEST "TRUE")
-##### Following controls cpack command #####
-#set (LOCAL_NO_PACKAGE "TRUE")
-##### Following controls source update #####
-#set (LOCAL_UPDATE "TRUE")
-set (REPOSITORY_URL "https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5.git")
-set (REPOSITORY_BRANCH "develop")
-
-#uncomment to use a compressed source file: *.tar on linux or mac *.zip on windows
-#set(CTEST_USE_TAR_SOURCE "${CTEST_SOURCE_VERSION}")
-###################################################################
-
-###################################################################
-if (${STATICONLYLIBRARIES})
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
- ######### Following describes computer ############
- ## following is optional to describe build ##
- set (SITE_BUILDNAME_SUFFIX "STATIC")
-endif ()
-###################################################################
-#### fortran ####
-if (${FORTRANLIBRARIES})
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=ON")
- ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=ON")
-else ()
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
- ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=OFF")
-endif ()
-#### java ####
-if (${JAVALIBRARIES})
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON")
-else ()
- set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=OFF")
-endif ()
-
-### change install prefix
-set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_INSTALL_PREFIX:PATH=${INSTALLDIR}")
-set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=$ENV{CMAKE_CONFIG_TYPE}")
-
-###################################################################
-
-if (WIN32)
- set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}")
- include (${CTEST_SCRIPT_DIRECTORY}\\HDF5options.cmake)
- include (${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
- if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe")
- file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi")
- file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip")
- file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
-else ()
- set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
- include (${CTEST_SCRIPT_DIRECTORY}/HDF5options.cmake)
- include (${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
- if (APPLE)
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- else ()
- if (CYGWIN)
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- else ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz")
- file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif ()
- endif ()
- endif ()
-endif ()
-
-HDF5options.cmake:
-#############################################################################################
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
-#############################################################################################
-
-### uncomment/comment and change the following lines for other configuration options
-
-#############################################################################################
-#### alternate toolsets ####
-#set(CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 17.0")
-
-#############################################################################################
-#### ext libraries ####
-
-### ext libs from tgz
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=TGZ -DTGZPATH:PATH=${CTEST_SCRIPT_DIRECTORY}")
-### ext libs from git
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=GIT")
-### ext libs on system
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_LIBRARY:FILEPATH=some_location/lib/zlib.lib -DZLIB_INCLUDE_DIR:PATH=some_location/include")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSZIP_LIBRARY:FILEPATH=some_location/lib/szlib.lib -DSZIP_INCLUDE_DIR:PATH=some_location/include")
-
-### disable ext zlib building
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF")
-### disable ext szip building
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF")
-
-#############################################################################################
-### disable test program builds
-
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=OFF")
-
-#############################################################################################
-### disable packaging
-
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_NO_PACKAGES:BOOL=ON")
-### Create install package with external libraries (szip, zlib)
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_EXTLIBS:BOOL=ON")
-
-#############################################################################################
-
-========================================================================
For further assistance, send email to help@hdfgroup.org
========================================================================
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index 48fbef0..a322164 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -67,7 +67,13 @@ New Features
Java Library:
----------------
- -
+ - JNI Read and Write
+
+ Refactored variable-length functions, H5DreadVL and H5AreadVL,
+ to correct dataset and attribute reads. New write functions,
+ H5DwriteVL and H5AwriteVL, are under construction.
+
+ (ADB - 2018/06/02, HDFFV10519)
Tools:
------
@@ -98,6 +104,17 @@ Bug Fixes since HDF5-1.10.2 release
Library
-------
+ - Error checks in h5stat and when decoding messages
+
+ h5stat exited with seg fault/core dumped when
+ errors are encountered in the internal library.
+
+ Add error checks and --enable-error-stack option to h5stat.
+ Add range checks when decoding messages: old fill value, old
+ layout and refcount.
+
+ (VC - 2018/07/11, HDFFV-10333)
+
- If an HDF5 file contains a malformed compound datatype with a
suitably large offset, the type conversion code can run off
the end of the type conversion buffer, causing a segmentation
diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt
index f188ab3..6f744d9 100644
--- a/release_docs/USING_CMake_Examples.txt
+++ b/release_docs/USING_CMake_Examples.txt
@@ -48,6 +48,8 @@ Default installation process:
with the CTEST_SOURCE_NAME script option.
The default installation folder is defined as "@CMAKE_INSTALL_PREFIX@".
It can be changed with the INSTALLDIR script option.
+ (Note: Windows has issues with spaces and paths -The path will need to
+ be set correctly.)
The default ctest configuration is defined as "Release". It can be changed
with the CTEST_CONFIGURATION_TYPE script option. Note that this must
be the same as the value used with the -C command line option.
diff --git a/release_docs/USING_HDF5_CMake.txt b/release_docs/USING_HDF5_CMake.txt
index 7d9a585..73a24d9 100644
--- a/release_docs/USING_HDF5_CMake.txt
+++ b/release_docs/USING_HDF5_CMake.txt
@@ -225,232 +225,6 @@ Also available at the HDF web site is a CMake application framework template.
You can quickly add files to the framework and execute the script to compile
your application with an installed HDF5 binary.
-========================================================================
-ctest use of HDF5_Examples.cmake and HDF5_Examples_options.cmake
-========================================================================
-
-cmake_minimum_required (VERSION 3.10)
-###############################################################################################################
-# This script will build and run the examples from a folder
-# Execute from a command line:
-# ctest -S HDF5_Examples.cmake,OPTION=VALUE -C Release -VV -O test.log
-###############################################################################################################
-
-set(CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@")
-if("@CMAKE_GENERATOR_TOOLSET@")
- set(CMAKE_GENERATOR_TOOLSET "@CMAKE_GENERATOR_TOOLSET@")
-endif()
-set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
-
-# handle input parameters to script.
-#INSTALLDIR - HDF5 root folder
-#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
-#CTEST_SOURCE_NAME - name of source folder; HDF5Examples
-if(DEFINED CTEST_SCRIPT_ARG)
- # transform ctest script arguments of the form
- # script.ctest,var1=value1,var2=value2
- # to variables with the respective names set to the respective values
- string(REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach(current_var ${script_args})
- if("${current_var}" MATCHES "^([^=]+)=(.+)$")
- set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
-
-###################################################################
-### Following Line is one of [Release, RelWithDebInfo, Debug] #####
-set(CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
-if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=${CTEST_CONFIGURATION_TYPE}")
-##################################################################
-
-if(NOT DEFINED INSTALLDIR)
- set(INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
-endif()
-
-if(NOT DEFINED CTEST_SOURCE_NAME)
- set(CTEST_SOURCE_NAME "HDF5Examples")
-endif()
-
-if(NOT DEFINED HDF_LOCAL)
- set(CDASH_LOCAL "NO")
-else()
- set(CDASH_LOCAL "YES")
-endif()
-if(NOT DEFINED CTEST_SITE)
- set(CTEST_SITE "local")
-endif()
-if(NOT DEFINED CTEST_BUILD_NAME)
- set(CTEST_BUILD_NAME "examples")
-endif()
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
-
-#TAR_SOURCE - name of tarfile
-#if(NOT DEFINED TAR_SOURCE)
-# set(CTEST_USE_TAR_SOURCE "HDF5Examples-1.10.7-Source")
-#endif()
-
-###############################################################################################################
-if(WIN32)
- set(SITE_OS_NAME "Windows")
- set(ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
- set(ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
-endif()
-if(${CDASH_LOCAL})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON")
-endif()
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
-
-###############################################################################################################
-# For any comments please contact help@hdfgroup.org
-#
-###############################################################################################################
-
-#############################################################################################
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-#### format for file: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
-#############################################################################################
-if(WIN32)
- include(${CTEST_SCRIPT_DIRECTORY}\\HDF5_Examples_options.cmake)
-else()
- include(${CTEST_SCRIPT_DIRECTORY}/HDF5_Examples_options.cmake)
-endif()
-
-#-----------------------------------------------------------------------------
-set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
-## --------------------------
-if (CTEST_USE_TAR_SOURCE)
- ## Uncompress source if tar or zip file provided
- ## --------------------------
- if (WIN32)
- message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.zip]")
- execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
- else ()
- message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
- execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
- endif ()
-
- if (NOT rv EQUAL 0)
- message (STATUS "extracting... [error-(${rv}) clean up]")
- file (REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
- message (FATAL_ERROR "error: extract of ${CTEST_SOURCE_NAME} failed")
- endif ()
-endif ()
-
-#-----------------------------------------------------------------------------
-## Clear the build directory
-## --------------------------
-set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
-if (EXISTS "${CTEST_BINARY_DIRECTORY}" AND IS_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
- ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY})
-else ()
- file (MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
-endif ()
-
-# Use multiple CPU cores to build
-include (ProcessorCount)
-ProcessorCount (N)
-if (NOT N EQUAL 0)
- if (NOT WIN32)
- set (CTEST_BUILD_FLAGS -j${N})
- endif ()
- set (ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
-endif ()
-set (CTEST_CONFIGURE_COMMAND
- "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
-)
-
-#-----------------------------------------------------------------------------
-## -- set output to english
-set ($ENV{LC_MESSAGES} "en_EN")
-
-#-----------------------------------------------------------------------------
-configure_file (${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
-ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
-## NORMAL process
-## --------------------------
-ctest_start (Experimental)
-ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}")
-if (LOCAL_SUBMIT)
- ctest_submit (PARTS Configure Notes)
-endif ()
-ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND)
-if (LOCAL_SUBMIT)
- ctest_submit (PARTS Build)
-endif ()
-ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
-if (LOCAL_SUBMIT)
- ctest_submit (PARTS Test)
-endif ()
-if (res GREATER 0)
- message (FATAL_ERROR "tests FAILED")
-endif ()
-#-----------------------------------------------------------------------------
-##############################################################################################################
-
-##############################################################################################################
-#### HDF5_Examples_options.cmake ###
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-##############################################################################################################
-#############################################################################################
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
-#### DEFAULT: ###
-#### BUILD_SHARED_LIBS:BOOL=OFF ###
-#### HDF_BUILD_C:BOOL=ON ###
-#### HDF_BUILD_CXX:BOOL=OFF ###
-#### HDF_BUILD_FORTRAN:BOOL=OFF ###
-#### HDF_BUILD_JAVA:BOOL=OFF ###
-#### BUILD_TESTING:BOOL=OFF ###
-#### HDF_ENABLE_PARALLEL:BOOL=OFF ###
-#### HDF_ENABLE_THREADSAFE:BOOL=OFF ###
-#############################################################################################
-
-### uncomment/comment and change the following lines for other configuration options
-### build with shared libraries
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=ON")
-
-#############################################################################################
-#### languages ####
-### disable C builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_C:BOOL=OFF")
-
-### enable C++ builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_CXX:BOOL=ON")
-
-### enable Fortran builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
-
-### enable JAVA builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
-
-#############################################################################################
-### enable parallel program builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_PARALLEL:BOOL=ON")
-
-#############################################################################################
-### enable threadsafe program builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_THREADSAFE:BOOL=ON")
-
-#############################################################################################
-### enable test program builds, requires reference files in testfiles subdirectory
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=ON")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCOMPARE_TESTING:BOOL=ON")
-
-#############################################################################################
-
-
========================================================================
For further assistance, send email to help@hdfgroup.org
diff --git a/src/H5.c b/src/H5.c
index 552330d..1b13fea 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -470,7 +470,7 @@ H5dont_atexit(void)
* library, which are supposed to free any unused memory they have
* allocated.
*
- * These should probably be registered dynamicly in a linked list of
+ * These should probably be registered dynamically in a linked list of
* functions to call, but there aren't that many right now, so we
* hard-wire them...
*
@@ -785,7 +785,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
HDfprintf (stderr, "%s", H5libhdf5_settings);
break;
default:
- /* 2 or higer: continue silently */
+ /* 2 or higher: continue silently */
break;
} /* end switch */
diff --git a/src/H5AC.c b/src/H5AC.c
index 5eccf9e..989ee10 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -318,7 +318,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxiliary structure")
aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
aux_ptr->mpi_comm = mpi_comm;
@@ -432,7 +432,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
done:
#ifdef H5_HAVE_PARALLEL
- /* if there is a failure, try to tidy up the auxilary structure */
+ /* if there is a failure, try to tidy up the auxiliary structure */
if(ret_value < 0) {
if(aux_ptr != NULL) {
if(aux_ptr->d_slist_ptr != NULL)
@@ -1878,7 +1878,7 @@ done:
* from the cache, clear it, and free it without writing it to
* disk.
*
- * This verion of the function is a complete re-write to
+ * This version of the function is a complete re-write to
* use the new metadata cache. While there isn't all that
* much difference between the old and new Purpose sections,
* the original version is given below.
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 7671abc..b60b933 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -933,7 +933,7 @@ done:
*
* Purpose: Update the dirty_bytes count for a newly inserted entry.
*
- * If mpi_rank isnt 0, this simply means adding the size
+ * If mpi_rank isn't 0, this simply means adding the size
* of the entry to the dirty_bytes count.
*
* If mpi_rank is 0, we must also add the entry to the
@@ -1332,7 +1332,7 @@ done:
* clean.
*
* This function is the main routine for handling this
- * notification proceedure. It must be called
+ * notification procedure. It must be called
* simultaniously on all processes that have the relevant
* file open. To this end, it is called only during a
* sync point, with a barrier prior to the call.
@@ -1340,7 +1340,7 @@ done:
* Note that any metadata entry writes by process 0 will
* occur after the barrier and just before this call.
*
- * Typicaly, calls to this function will be triggered in
+ * Typically, calls to this function will be triggered in
* one of two ways:
*
* 1) Dirty byte creation exceeds some user specified value.
@@ -2128,7 +2128,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu
/* clear collective access flag on half of the entries in the
cache and mark them as independent in case they need to be
- evicted later. All ranks are guranteed to mark the same entries
+ evicted later. All ranks are guaranteed to mark the same entries
since we don't modify the order of the collectively accessed
entries except through collective access. */
if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 409d914..9bf84bf 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -189,7 +189,7 @@ H5FL_EXTERN(H5AC_aux_t);
* is permitted to write to file.
*
* dirty_bytes_threshold: Integer field containing the dirty bytes
- * generation threashold. Whenever dirty byte creation
+ * generation threshold. Whenever dirty byte creation
* exceeds this value, the metadata cache on process 0
* broadcasts a list of the entries it has flushed since
* the last broadcast (or since the beginning of execution)
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 654a877..a48aa69 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -39,7 +39,7 @@ extern "C" {
* structure H5AC_cache_config_t
*
* H5AC_cache_config_t is a public structure intended for use in public APIs.
- * At least in its initial incarnation, it is basicaly a copy of struct
+ * At least in its initial incarnation, it is basically a copy of struct
* H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
* dirty_bytes_threshold field.
*
@@ -212,7 +212,7 @@ extern "C" {
*
* flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated
* type whose value indicates whether and by which algorithm we should
- * make flash increases in the size of the cache to accomodate insertion
+ * make flash increases in the size of the cache to accommodate insertion
* of large entries and large increases in the size of a single entry.
*
* The addition of the flash increment mode was occasioned by performance
@@ -379,7 +379,7 @@ extern "C" {
* synchronize updates between caches. (See above for outline and
* motivation.)
*
- * This value MUST be consistant across all processes accessing the
+ * This value MUST be consistent across all processes accessing the
* file. This field is ignored unless HDF5 has been compiled for
* parallel.
*
diff --git a/src/H5Aint.c b/src/H5Aint.c
index e666e8c..55560c7 100644
--- a/src/H5Aint.c
+++ b/src/H5Aint.c
@@ -1152,7 +1152,7 @@ H5A__free(H5A_t *attr)
HDassert(attr);
- /* Free dynamicly allocated items */
+ /* Free dynamically allocated items */
if(attr->shared->name) {
H5MM_xfree(attr->shared->name);
attr->shared->name = NULL;
@@ -1235,7 +1235,7 @@ H5A__close(H5A_t *attr)
/* Reference count can be 0. It only happens when H5A__create fails. */
if(attr->shared->nrefs <= 1) {
- /* Free dynamicly allocated items */
+ /* Free dynamically allocated items */
if(H5A__free(attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info")
diff --git a/src/H5Apkg.h b/src/H5Apkg.h
index 3315cc1..af16b55 100644
--- a/src/H5Apkg.h
+++ b/src/H5Apkg.h
@@ -86,7 +86,7 @@ typedef struct H5A_shared_t {
void *data; /* Attribute data (on a temporary basis) */
size_t data_size; /* Size of data on disk */
H5O_msg_crt_idx_t crt_idx; /* Attribute's creation index in the object header */
- unsigned nrefs; /* Ref count for times this object is refered */
+ unsigned nrefs; /* Ref count for times this object is referred */
} H5A_shared_t;
/* Define the main attribute structure */
diff --git a/src/H5B.c b/src/H5B.c
index 1f19d7b..53d5529 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -769,7 +769,7 @@ H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
* the specified type.
*
* On return, if LT_KEY_CHANGED is non-zero, then LT_KEY is
- * the new native left key. Similarily for RT_KEY_CHANGED
+ * the new native left key. Similarly for RT_KEY_CHANGED
* and RT_KEY.
*
* If the node splits, then MD_KEY contains the key that
diff --git a/src/H5B2internal.c b/src/H5B2internal.c
index 95ed769..7f6b80a 100644
--- a/src/H5B2internal.c
+++ b/src/H5B2internal.c
@@ -1192,7 +1192,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hbool_t *depth_decreased,
if(swap_loc)
idx = 0;
else {
- /* Count from the orginal index value again */
+ /* Count from the original index value again */
n = orig_n;
/* Reset "found" flag - the record may have shifted during the
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 270b4e6..e203b87 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -122,7 +122,7 @@ typedef struct H5B_class_t {
H5B_ins_t (*insert)(H5F_t*, haddr_t, void*, hbool_t*, void*, void*,
void*, hbool_t*, haddr_t*);
- /* min insert uses min leaf, not new(), similarily for max insert */
+ /* min insert uses min leaf, not new(), similarly for max insert */
hbool_t follow_min;
hbool_t follow_max;
diff --git a/src/H5C.c b/src/H5C.c
index 729b267..c4d4eed 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -793,8 +793,8 @@ H5C_prep_for_file_close(H5F_t *f)
* close, and since the close warning is issued after all
* non FSM related space allocations and just before the
* first sync point on close, this call will leave the caches
- * in a consistant state across the processes if they were
- * consistant before.
+ * in a consistent state across the processes if they were
+ * consistent before.
*
* 2) Since the FSM settle routines are only invoked once during
* file close, invoking them now will prevent their invocation
@@ -2255,7 +2255,7 @@ H5C_protect(H5F_t * f,
if(entry_ptr != NULL) {
if(entry_ptr->ring != ring)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry")
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
@@ -3095,7 +3095,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
*
* All this is a bit awkward, but until the metadata cache entries
* are contiguous, with only one dirty flag, we have to let the supplied
- * functions deal with the reseting the is_dirty flag.
+ * functions deal with the resetting the is_dirty flag.
*/
if(entry_ptr->clear_on_unprotect) {
/* Sanity check */
@@ -5029,7 +5029,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
/* get the hit rate for the reporting function. Should still
- * be good as we havent reset the hit rate statistics.
+ * be good as we haven't reset the hit rate statistics.
*/
if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
@@ -5272,7 +5272,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
- /* The flush proceedure here is a bit strange.
+ /* The flush procedure here is a bit strange.
*
* In the outer while loop we make at least one pass through the
* cache, and then repeat until either all the pinned entries in
@@ -5421,7 +5421,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
*
* While this optimization used to be easy, with the possibility
* of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in cannonical form at all
+ * flush, we must keep the slist in canonical form at all
* times.
*/
if(((!entry_ptr->flush_me_last) ||
@@ -6280,7 +6280,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
entry_addr = entry_ptr->addr;
/* Internal cache data structures should now be up to date, and
- * consistant with the status of the entry.
+ * consistent with the status of the entry.
*
* Now discard the entry if appropriate.
*/
@@ -6301,10 +6301,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
/* If the entry is not a prefetched entry, verify that the flush
- * dependency parents addresses array has been transfered.
+ * dependency parents addresses array has been transferred.
*
* If the entry is prefetched, the free_isr routine will dispose of
- * the flush dependency parents adresses array if necessary.
+ * the flush dependency parents addresses array if necessary.
*/
if(!entry_ptr->prefetched) {
HDassert(0 == entry_ptr->fd_parent_count);
@@ -8587,7 +8587,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
* tests will be necessary.
*/
if(cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
#endif
/* If required, resize the buffer and update the entry and the cache
@@ -8787,7 +8787,7 @@ H5C_remove_entry(void *_entry)
cache->entry_watched_for_removal = NULL;
/* Internal cache data structures should now be up to date, and
- * consistant with the status of the entry.
+ * consistent with the status of the entry.
*
* Now clean up internal cache fields if appropriate.
*/
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 51de5c4..26b6506 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -1494,7 +1494,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
* if the underlying file alignment is greater than 1.
*
* Clean this up eventually by extending the size of the cache
- * image block to the next alignement boundary, and then setting
+ * image block to the next alignment boundary, and then setting
* the image_data_len to the actual size of the cache_image.
*
* On the off chance that there is some other way to get a
@@ -1554,7 +1554,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
* metadata cache image superblock extension message, set
* cache_ptr->image_ctl.generate_image to FALSE. This will
* allow the file close to continue normally without the
- * unecessary generation of the metadata cache image.
+ * unnecessary generation of the metadata cache image.
*/
if(cache_ptr->num_entries_in_image > 0) {
if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index e342d69..ecaed62 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -380,7 +380,7 @@ H5C_apply_candidate_list(H5F_t * f,
* Note that we are doing things in this round about manner so as
* to preserve the order of the LRU list to the best of our ability.
* If we don't do this, my experiments indicate that we will have a
- * noticably poorer hit ratio as a result.
+ * noticeably poorer hit ratio as a result.
*/
if(H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index c5dc8f3..98d7a01 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -58,7 +58,7 @@
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
* variety of reasons -- protected list, LRU list, and the clean and dirty
* LRU lists at present. The following macros support linking and unlinking
- * of instances of H5C_cache_entry_t by both their regular and auxilary next
+ * of instances of H5C_cache_entry_t by both their regular and auxiliary next
* and previous pointers.
*
* The size and length fields are also maintained.
@@ -2006,7 +2006,7 @@ if ( ( (cache_ptr)->index_size != \
* a bit more performance out of the cache.
*
* At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
+ * white space in the macro. If they cause difficulties with
* the pre-processor, I'll have to remove them.
*
* JRM - 7/28/04
@@ -2117,7 +2117,7 @@ if ( ( (cache_ptr)->index_size != \
* a bit more performance out of the cache.
*
* At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
+ * white space in the macro. If they cause difficulties with
* pre-processor, I'll have to remove them.
*
* JRM - 7/28/04
@@ -2375,7 +2375,7 @@ if ( ( (cache_ptr)->index_size != \
* a bit more performance out of the cache.
*
* At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
+ * white space in the macro. If they cause difficulties with
* pre-processor, I'll have to remove them.
*
* JRM - 7/28/04
@@ -2513,7 +2513,7 @@ if ( ( (cache_ptr)->index_size != \
* a bit more performance out of the cache.
*
* At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
+ * white space in the macro. If they cause difficulties with
* pre-processor, I'll have to remove them.
*
* JRM - 7/28/04
@@ -3057,7 +3057,7 @@ if ( ( (cache_ptr)->index_size != \
* squeeze a bit more performance out of the cache.
*
* At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
+ * white space in the macro. If they cause difficulties with
* pre-processor, I'll have to remove them.
*
* JRM - 7/28/04
@@ -3892,7 +3892,7 @@ typedef struct H5C_tag_info_t {
* 2) A pinned entry can be accessed or modified at any time.
* This places an additional burden on the associated pre-serialize
* and serialize callbacks, which must ensure the the entry is in
- * a consistant state before creating an image of it.
+ * a consistent state before creating an image of it.
*
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
@@ -4604,7 +4604,7 @@ typedef struct H5C_tag_info_t {
* improper behavior if the next entry in the list is the target of one on
* these operations.
*
- * The following fields are use to count such occurances. They are used
+ * The following fields are use to count such occurrences. They are used
* both in tests (to verify that the scan has been restarted), and to
* obtain estimates of how frequently these restarts occur.
*
@@ -4624,7 +4624,7 @@ typedef struct H5C_tag_info_t {
* than the target entry as the result of call(s) to the
* pre_serialize or serialize callbacks.
*
- * Note that at present, this condition can only be triggerd
+ * Note that at present, this condition can only be triggered
* by a call to H5C_serialize_single_entry().
*
* The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 522b3cf..38a86ee 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -87,7 +87,7 @@
#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024))
/* Default max cache size and min clean size are give here to make
- * them generally accessable.
+ * them generally accessible.
*/
#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
@@ -901,7 +901,7 @@ typedef struct H5C_class_t {
H5C_get_fsf_size_t fsf_size;
} H5C_class_t;
-/* Type defintions of callback functions used by the cache as a whole */
+/* Type definitions of callback functions used by the cache as a whole */
typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f,
hbool_t *write_permitted_ptr);
typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
@@ -925,7 +925,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
* flush dependency -- with the implied setup and takedown overhead and
* added complexity. Further, the flush ordering between rings need only
* be enforced on flush operations, and thus the use of flush dependencies
- * instead would apply unecessary constraints on flushes under normal
+ * instead would apply unnecessary constraints on flushes under normal
* operating circumstances.
*
* As of this writing, all metadata entries pretaining to data sets and
@@ -1092,7 +1092,7 @@ typedef int H5C_ring_t;
* 2) A pinned entry can be accessed or modified at any time.
* This places an extra burden on the pre-serialize and
* serialize callbacks, which must ensure that a pinned
- * entry is consistant and ready to write to disk before
+ * entry is consistent and ready to write to disk before
* generating an image.
*
* 3) A pinned entry can be marked as dirty (and possibly
@@ -1152,7 +1152,7 @@ typedef int H5C_ring_t;
* flush_immediately: Boolean flag used only in Phdf5 -- and then only
* for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
- * When a destributed metadata write is triggered at a
+ * When a distributed metadata write is triggered at a
* sync point, this field is used to mark entries that
* must be flushed before leaving the sync point. At all
* other times, this field should be set to FALSE.
@@ -1430,7 +1430,7 @@ typedef int H5C_ring_t;
* The flush dependency height of any entry involved in a
* flush dependency relationship is defined to be the
* longest flush dependency path from that entry to an entry
- * with no flush depenency children.
+ * with no flush dependency children.
*
* Since the image_fd_height is used to order entries in the
* cache image so that fd parents preceed fd children, for
@@ -1468,8 +1468,8 @@ typedef int H5C_ring_t;
*
* Further, if the prefetched entry is a flush dependency parent,
* all its flush dependency children (which must also be
- * prefetched entries), must be tranfered to the new cache
- * entry returned by the deserailization callback.
+ * prefetched entries), must be transferred to the new cache
+ * entry returned by the deserialization callback.
*
* Finally, if the prefetched entry is a flush dependency child,
* this flush dependency must be destroyed prior to the
@@ -1737,7 +1737,7 @@ typedef struct H5C_cache_entry_t {
* The flush dependency height of any entry involved in a
* flush dependency relationship is defined to be the
* longest flush dependency path from that entry to an entry
- * with no flush depenency children.
+ * with no flush dependency children.
*
* Since the image_fd_height is used to order entries in the
* cache image so that fd parents preceed fd children, for
@@ -1946,7 +1946,7 @@ typedef struct H5C_image_entry_t {
*
* flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated
* type whose value indicates whether and by what algorithm we should
- * make flash increases in the size of the cache to accomodate insertion
+ * make flash increases in the size of the cache to accommodate insertion
* of large entries and large increases in the size of a single entry.
*
* The addition of the flash increment mode was occasioned by performance
@@ -2182,9 +2182,9 @@ typedef struct H5C_auto_size_ctl_t {
* current value, any value in excess of 255 will be the functional
* equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
*
- * flags: Unsigned integer containing flags controling which aspects of the
+ * flags: Unsigned integer containing flags controlling which aspects of the
* cache image functinality is actually executed. The primary impetus
- * behind this field is to allow developement of tests for partial
+ * behind this field is to allow development of tests for partial
* implementations that will require little if any modification to run
* with the full implementation. In normal operation, all flags should
* be set.
diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c
index f9074be..d60e79e 100644
--- a/src/H5Dbtree2.c
+++ b/src/H5Dbtree2.c
@@ -378,7 +378,7 @@ H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx)
{
H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
- unsigned u; /* Local index varible */
+ unsigned u; /* Local index variable */
FUNC_ENTER_STATIC_NOERR
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 7216c49..e64a60f 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -695,7 +695,7 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims,
/* Compute the # of chunks in dataset dimensions */
for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) {
- /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ /* Round up to the next integer # of chunks, to accommodate partial chunks */
layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
if(H5S_UNLIMITED == max_dims[u])
layout->max_chunks[u] = H5S_UNLIMITED;
@@ -2933,8 +2933,41 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled,
H5F_set_coll_md_read(idx_info.f, temp_cmr);
#endif /* H5_HAVE_PARALLEL */
- /* Cache the information retrieved */
- H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
+ /*
+ * Cache the information retrieved.
+ *
+ * Note that if we are writing to the dataset in parallel and filters
+ * are involved, we skip caching this information as it is highly likely
+ * that the chunk information will be invalidated as a result of the
+ * filter operation (e.g. the chunk gets re-allocated to a different
+ * address in the file and/or gets re-allocated with a different size).
+ * If we were to cache this information, subsequent reads/writes would
+ * retrieve the invalid information and cause a variety of issues.
+ *
+ * It has been verified that in the serial library, when writing to chunks
+ * with the real chunk cache disabled and with filters involved, the
+ * functions within this file are correctly called in such a manner that
+ * this single chunk cache is always updated correctly. Therefore, this
+ * check is not needed for the serial library.
+ *
+ * This is an ugly and potentially frail check, but the
+ * H5D__chunk_cinfo_cache_reset() function is not currently available
+ * to functions outside of this file, so outside functions can not
+ * invalidate this single chunk cache. Even if the function were available,
+ * this check prevents us from doing the work of going through and caching
+ * each chunk in the write operation, when we're only going to invalidate
+ * the cache at the end of a parallel write anyway.
+ *
+ * - JTH (7/13/2018)
+ */
+#ifdef H5_HAVE_PARALLEL
+ if ( !( (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI))
+ && (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR)
+ && dset->shared->dcpl_cache.pline.nused
+ )
+ )
+#endif
+ H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
} /* end if */
} /* end else */
@@ -3016,7 +3049,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset)
} /* end if */
else {
/*
- * If we are reseting and something goes wrong after this
+ * If we are resetting and something goes wrong after this
* point then it's too late to recover because we may have
* destroyed the original data by calling H5Z_pipeline().
* The only safe option is to continue with the reset
@@ -4449,7 +4482,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[])
/* Start off with this dimension marked as not needing to be modified */
new_full_dim[op_dim] = FALSE;
- /* Calulate offset of first previously incomplete chunk in this
+ /* Calculate offset of first previously incomplete chunk in this
* dimension */
old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]);
@@ -4970,7 +5003,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim)
HGOTO_DONE(SUCCEED)
} /* end if */
- /* Round up to the next integer # of chunks, to accomodate partial chunks */
+ /* Round up to the next integer # of chunks, to accommodate partial chunks */
/* Use current dims because the indices have already been updated! -NAF */
/* (also compute the number of elements per chunk) */
/* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */
diff --git a/src/H5Dint.c b/src/H5Dint.c
index b9d9cce..2f67226 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -604,7 +604,7 @@ H5D__cache_dataspace_info(const H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions")
dset->shared->ndims = (unsigned)sndims;
- /* Compute the inital 'power2up' values */
+ /* Compute the initial 'power2up' values */
for(u = 0; u < dset->shared->ndims; u++) {
hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
@@ -782,7 +782,7 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id)
H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */
/* Shallow copy the fill value property */
- /* (we only want to make certain that the shared component isnt' modified) */
+ /* (we only want to make certain that the shared component isn't modified) */
HDmemcpy(&old_fill_prop, fill_prop, sizeof(old_fill_prop));
/* Reset shared component info */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 107b751..d721ae6 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -234,7 +234,7 @@ static herr_t H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info
H5D_filtered_collective_io_info_t *local_chunk_array, size_t *local_chunk_array_num_entries);
static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries,
size_t array_entry_size, void **gathered_array, size_t *gathered_array_num_entries,
- int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *));
+ hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *));
static herr_t H5D__mpio_filtered_collective_write_type(
H5D_filtered_collective_io_info_t *chunk_list, size_t num_entries,
MPI_Datatype *new_mem_type, hbool_t *mem_type_derived,
@@ -418,19 +418,16 @@ done:
* Function: H5D__mpio_array_gatherv
*
* Purpose: Given an array, specified in local_array, by each processor
- * calling this function, gathers each array into a single
+ * calling this function, collects each array into a single
* array which is then either gathered to the processor
* specified by root, when allgather is false, or is
* distributed back to all processors when allgather is true.
*
- * The size of each entry and number of entries in the array
- * contributed by an individual processor should be specified
- * in array_entry_size and local_array_num_entries,
+ * The number of entries in the array contributed by an
+ * individual processor and the size of each entry should be
+ * specified in local_array_num_entries and array_entry_size,
* respectively.
*
- * The number of processors participating in the gather
- * operation should be specified for nprocs.
- *
* The MPI communicator to use should be specified for comm.
*
* If the sort_func argument is supplied, the array is sorted
@@ -448,14 +445,13 @@ done:
static herr_t
H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries,
size_t array_entry_size, void **_gathered_array, size_t *_gathered_array_num_entries,
- int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *))
+ hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *))
{
size_t gathered_array_num_entries = 0; /* The size of the newly-constructed array */
- size_t i;
void *gathered_array = NULL; /* The newly-constructed array returned to the caller */
- int *receive_counts_array = NULL; /* Array containing number of entries each process is contributing */
- int *displacements_array = NULL; /* Array of displacements where each process places its data in the final array */
- int mpi_code;
+ int *receive_counts_array = NULL; /* Array containing number of entries each processor is contributing */
+ int *displacements_array = NULL; /* Array of displacements where each processor places its data in the final array */
+ int mpi_code, mpi_rank, mpi_size;
int sendcount;
herr_t ret_value = SUCCEED;
@@ -464,34 +460,62 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries,
HDassert(_gathered_array);
HDassert(_gathered_array_num_entries);
- /* Determine the size of the end result array */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Determine the size of the end result array by collecting the number
+ * of entries contributed by each processor into a single total.
+ */
if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_array_num_entries, &gathered_array_num_entries, 1, MPI_INT, MPI_SUM, comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
- /* If 0 entries resulted from the collective operation, no one is writing anything */
+ /* If 0 entries resulted from the collective operation, no processor is contributing anything and there is nothing to do */
if (gathered_array_num_entries > 0) {
- if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array")
+ /*
+ * If gathering to all processors, all processors need to allocate space for the resulting array, as well as
+ * the receive counts and displacements arrays for the collective MPI_Allgatherv call. Otherwise, only the
+ * root processor needs to allocate the space for an MPI_Gatherv call.
+ */
+ if (allgather || (mpi_rank == root)) {
+ if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array")
- if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array")
+ if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array")
- if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array")
+ if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array")
+ } /* end if */
- /* Inform each process of how many entries each other process is contributing to the resulting array */
- if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
+ /*
+ * If gathering to all processors, inform each processor of how many entries each other processor is
+ * contributing to the resulting array by collecting the counts into each processor's "receive counts"
+ * array. Otherwise, inform only the root processor of how many entries each other processor is contributing.
+ */
+ if (allgather) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
+ } /* end if */
+ else {
+ if (MPI_SUCCESS != (mpi_code = MPI_Gather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, root, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code)
+ } /* end else */
- /* Multiply each receive count by the size of the array entry, since the data is sent as bytes */
- for (i = 0; i < (size_t) nprocs; i++)
- H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t);
+ if (allgather || (mpi_rank == root)) {
+ size_t i;
- /* Set receive buffer offsets for MPI_Allgatherv */
- displacements_array[0] = 0;
- for (i = 1; i < (size_t) nprocs; i++)
- displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1];
+ /* Multiply each receive count by the size of the array entry, since the data is sent as bytes. */
+ for (i = 0; i < (size_t) mpi_size; i++)
+ H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t);
+ /* Set receive buffer offsets for the collective MPI_Allgatherv/MPI_Gatherv call. */
+ displacements_array[0] = 0;
+ for (i = 1; i < (size_t) mpi_size; i++)
+ displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1];
+ } /* end if */
+
+ /* As the data is sent as bytes, calculate the true sendcount for the data. */
H5_CHECKED_ASSIGN(sendcount, int, local_array_num_entries * array_entry_size, size_t);
if (allgather) {
@@ -502,10 +526,11 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries,
else {
if (MPI_SUCCESS != (mpi_code = MPI_Gatherv(local_array, sendcount, MPI_BYTE,
gathered_array, receive_counts_array, displacements_array, MPI_BYTE, root, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Gatherv failed", mpi_code)
} /* end else */
- if (sort_func) HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func);
+ if (sort_func && (allgather || (mpi_rank == root)))
+ HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func);
} /* end if */
*_gathered_array = gathered_array;
@@ -706,7 +731,7 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
io_option = H5D_MULTI_CHUNK_IO;
/* via default path. branch by num threshold */
else {
- unsigned one_link_chunk_io_threshold; /* Threshhold to use single collective I/O for all chunks */
+ unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */
int mpi_size; /* Number of processes in MPI job */
if(H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0)
@@ -1295,8 +1320,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
* of the chunks in the file.
*/
if (H5D__mpio_array_gatherv(chunk_list, chunk_list_num_entries, sizeof(H5D_filtered_collective_io_info_t),
- (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size,
- true, 0, io_info->comm, NULL) < 0)
+ (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes")
/* Collectively re-allocate the modified chunks (from each process) in the file */
@@ -1768,8 +1792,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
* of the chunks in the file
*/
if (H5D__mpio_array_gatherv(&chunk_list[i], have_chunk_to_process ? 1 : 0, sizeof(H5D_filtered_collective_io_info_t),
- (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size,
- true, 0, io_info->comm, NULL) < 0)
+ (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes")
/* Participate in the collective re-allocation of all chunks modified
@@ -2655,8 +2678,8 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
* call, the gathered list will initially be sorted in increasing order of chunk offset in the file.
*/
if (H5D__mpio_array_gatherv(local_chunk_array, *local_chunk_array_num_entries, sizeof(H5D_filtered_collective_io_info_t),
- (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, mpi_size,
- false, 0, io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0)
+ (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, false, 0,
+ io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather array")
/* Rank 0 redistributes any shared chunks to new owners as necessary */
@@ -2765,7 +2788,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace")
- /* Intialize iterator for memory selection */
+ /* Initialize iterator for memory selection */
if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information")
mem_iter_init = TRUE;
@@ -2981,7 +3004,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
{
H5D_chunk_info_t *chunk_info = NULL;
H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */
- unsigned char *mod_data = NULL; /* Chunk modification data sent by a process to a chunk's owner */
+ H5S_sel_iter_t *file_iter = NULL;
H5Z_EDC_t err_detect; /* Error detection info */
H5Z_cb_t filter_cb; /* I/O filter callback function */
unsigned filter_mask = 0;
@@ -2989,11 +3012,13 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
hssize_t extent_npoints;
hsize_t true_chunk_size;
hbool_t mem_iter_init = FALSE;
+ hbool_t file_iter_init = FALSE;
size_t buf_size;
size_t i;
H5S_t *dataspace = NULL; /* Other process' dataspace for the chunk */
- void *tmp_gath_buf = NULL; /* Temporary gather buffer for owner of the chunk to gather into from
- application write buffer before scattering out to the chunk data buffer */
+ void *tmp_gath_buf = NULL; /* Temporary gather buffer to gather into from application buffer
+ before scattering out to the chunk data buffer (when writing data),
+ or vice versa (when reading data) */
int mpi_code;
herr_t ret_value = SUCCEED;
@@ -3073,9 +3098,6 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information")
mem_iter_init = TRUE;
- if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
-
/* If this is a read operation, scatter the read chunk data to the user's buffer.
*
* If this is a write operation, update the chunk data buffer with the modifications
@@ -3084,16 +3106,39 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
*/
switch (io_info->op_type) {
case H5D_IO_OP_READ:
- if (H5D__scatter_mem(chunk_entry->buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer")
+ if (NULL == (file_iter = (H5S_sel_iter_t *) H5MM_malloc(sizeof(H5S_sel_iter_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file iterator")
+
+ if (H5S_select_iter_init(file_iter, chunk_info->fspace, type_info->src_type_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information")
+ file_iter_init = TRUE;
+
+ if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
+
+ if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer")
+
+ if (!H5D__gather_mem(chunk_entry->buf, chunk_info->fspace, file_iter, (size_t) iter_nelmts, tmp_gath_buf))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't gather from chunk buffer")
+
+ if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
+
+ if (H5D__scatter_mem(tmp_gath_buf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, io_info->u.rbuf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer")
+
break;
case H5D_IO_OP_WRITE:
+ if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
+
if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer")
/* Gather modification data from the application write buffer into a temporary buffer */
- if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, tmp_gath_buf))
+ if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, tmp_gath_buf))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't gather from write buffer")
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
@@ -3111,7 +3156,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
/* Scatter the owner's modification data into the chunk data buffer according to
* the file space.
*/
- if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
+ if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t) iter_nelmts, chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to chunk data buffer")
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
@@ -3177,10 +3222,12 @@ done:
H5MM_free(chunk_entry->async_info.receive_buffer_array);
if (chunk_entry->async_info.receive_requests_array)
H5MM_free(chunk_entry->async_info.receive_requests_array);
- if (mod_data)
- H5MM_free(mod_data);
if (tmp_gath_buf)
H5MM_free(tmp_gath_buf);
+ if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
+ if (file_iter)
+ H5MM_free(file_iter);
if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
if (mem_iter)
diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c
index c7c0775..7f1ac86 100644
--- a/src/H5Dvirtual.c
+++ b/src/H5Dvirtual.c
@@ -796,7 +796,7 @@ H5D__virtual_copy(H5F_t *f_dst, H5O_layout_t *layout_dst)
if(f_dst == f_src) {
/* Increase reference count on global heap object */
if((heap_rc = H5HG_link(f_dst, (H5HG_t *)&(layout_dst->u.virt.serial_list_hobjid), 1)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count")
} /* end if */
else
#endif /* NOT_YET */
@@ -847,7 +847,7 @@ H5D__virtual_delete(H5F_t *f, H5O_storage_t *storage)
#ifdef NOT_YET
/* Unlink the global heap block */
if((heap_rc = H5HG_link(f, (H5HG_t *)&(storage->u.virt.serial_list_hobjid), -1)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count")
if(heap_rc == 0)
#endif /* NOT_YET */
/* Delete the global heap block */
diff --git a/src/H5Edeprec.c b/src/H5Edeprec.c
index 9ff9f1f..fc3eb9c 100644
--- a/src/H5Edeprec.c
+++ b/src/H5Edeprec.c
@@ -227,7 +227,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Eclear1
*
- * Purpose: This function is for backward compatbility.
+ * Purpose: This function is for backward compatibility.
* Clears the error stack for the specified error stack.
*
* Return: Non-negative on success/Negative on failure
@@ -258,7 +258,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Eprint1
*
- * Purpose: This function is for backward compatbility.
+ * Purpose: This function is for backward compatibility.
* Prints the error stack in some default way. This is just a
* convenience function for H5Ewalk() with a function that
* prints error messages. Users are encouraged to write there
@@ -296,7 +296,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Ewalk1
*
- * Purpose: This function is for backward compatbility.
+ * Purpose: This function is for backward compatibility.
* Walks the error stack for the current thread and calls some
* function for each error along the way.
*
@@ -335,7 +335,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Eget_auto1
*
- * Purpose: This function is for backward compatbility.
+ * Purpose: This function is for backward compatibility.
* Returns the current settings for the automatic error stack
* traversal function and its data for specific error stack.
* Either (or both) arguments may be null in which case the
@@ -386,7 +386,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Eset_auto1
*
- * Purpose: This function is for backward compatbility.
+ * Purpose: This function is for backward compatibility.
* Turns on or off automatic printing of errors for certain
* error stack. When turned on (non-null FUNC pointer) any
* API function which returns an error indication will first
diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c
index 811ea8e..906ec28 100644
--- a/src/H5FDdirect.c
+++ b/src/H5FDdirect.c
@@ -951,7 +951,7 @@ H5FD_direct_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UN
do {
/* Read the aligned data in file first. Not able to handle interrupted
* system calls and partial results like sec2 driver does because the
- * data may no longer be aligned. It's expecially true when the data in
+ * data may no longer be aligned. It's especially true when the data in
* file is smaller than ALLOC_SIZE. */
HDmemset(copy_buf, 0, alloc_size);
@@ -1138,9 +1138,9 @@ H5FD_direct_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_U
/*
* Read the aligned data first if the aligned region doesn't fall
- * entirely in the range to be writen. Not able to handle interrupted
+ * entirely in the range to be written. Not able to handle interrupted
* system calls and partial results like sec2 driver does because the
- * data may no longer be aligned. It's expecially true when the data in
+ * data may no longer be aligned. It's especially true when the data in
* file is smaller than ALLOC_SIZE. Only read the entire section if
* both ends are misaligned, otherwise only read the block on the
* misaligned end.
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index 3c1c7bb..e52a71a 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -548,7 +548,7 @@ H5FD_family_sb_encode(H5FD_t *_file, char *name/*out*/, unsigned char *buf/*out*
/*-------------------------------------------------------------------------
* Function: H5FD_family_sb_decode
*
- * Purpose: This function has 2 seperate purpose. One is to decodes the
+ * Purpose: This function has 2 separate purpose. One is to decodes the
* superblock information for this driver. The NAME argument is
* the eight-character (plus null termination) name stored in i
* the file. The FILE argument is updated according to the
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index ee3277d..87f8b6a 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -1462,7 +1462,7 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type,
/* Only look for MPI views for raw data transfers */
if(type == H5FD_MEM_DRAW) {
- H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */
/* Get the transfer mode from the API context */
if(H5CX_get_io_xfer_mode(&xfer_mode) < 0)
@@ -1717,7 +1717,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id,
#endif
int size_i;
hbool_t use_view_this_time = FALSE;
- H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 8ae23d2..aa1b118 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -263,7 +263,7 @@ H5FD_multi_term(void)
/*-------------------------------------------------------------------------
* Function: H5Pset_fapl_split
*
- * Purpose: Compatability function. Makes the multi driver act like the
+ * Purpose: Compatibility function. Makes the multi driver act like the
* old split driver which stored meta data in one file and raw
* data in another file.
*
diff --git a/src/H5FL.c b/src/H5FL.c
index 0e67414..89a580a 100644
--- a/src/H5FL.c
+++ b/src/H5FL.c
@@ -18,7 +18,7 @@
* Purpose: Manage priority queues of free-lists (of blocks of bytes).
* These are used in various places in the library which allocate and
* free differently blocks of bytes repeatedly. Usually the same size
- * of block is allocated and freed repeatly in a loop, while writing out
+ * of block is allocated and freed repeatedly in a loop, while writing out
* chunked data for example, but the blocks may also be of different sizes
* from different datasets and an attempt is made to optimize access to
* the proper free list of blocks by using these priority queues to
@@ -499,7 +499,7 @@ H5FL_reg_calloc(H5FL_reg_head_t *head H5FL_TRACK_PARAMS)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Clear to zeros */
- /* (Accomodate tracking information, if present) */
+ /* (Accommodate tracking information, if present) */
HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE);
done:
@@ -2220,7 +2220,7 @@ H5FL_fac_calloc(H5FL_fac_head_t *head H5FL_TRACK_PARAMS)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Clear to zeros */
- /* (Accomodate tracking information, if present) */
+ /* (Accommodate tracking information, if present) */
HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE);
done:
diff --git a/src/H5FS.c b/src/H5FS.c
index e1a760f..113c8ae 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -135,7 +135,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
fspace->alignment = alignment;
fspace->align_thres = threshold;
- /* Check if the free space tracker is supposed to be persistant */
+ /* Check if the free space tracker is supposed to be persistent */
if(fs_addr) {
/* Allocate space for the free space header */
if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, (hsize_t)fspace->hdr_size)))
@@ -423,7 +423,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu, fspace->serial_sect_count =
HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", FUNC, fspace->alloc_sect_size, fspace->sect_size);
#endif /* H5FS_DEBUG */
/* If there are sections to serialize, update them */
- /* (if the free space manager is persistant) */
+ /* (if the free space manager is persistent) */
if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) {
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: Real sections to store in file\n", FUNC);
@@ -794,7 +794,7 @@ HDfprintf(stderr, "%s: Marking free space header as dirty\n", FUNC);
/* Sanity check */
HDassert(fspace);
- /* Check if the free space manager is persistant */
+ /* Check if the free space manager is persistent */
if(H5F_addr_defined(fspace->addr))
/* Mark header as dirty in cache */
if(H5AC_mark_entry_dirty(fspace) < 0)
@@ -883,7 +883,7 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache")
/* Since space has been allocated for the section info and the sinfo
- * has been inserted into the cache, relinquish owership (i.e. float)
+ * has been inserted into the cache, relinquish ownership (i.e. float)
* the section info.
*/
fspace->sinfo = NULL;
diff --git a/src/H5FScache.c b/src/H5FScache.c
index 875a383..fa04ba1 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -457,7 +457,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
* H5F_addr_defined(fspace->addr)
*
* will both be TRUE. If this contition does not hold, then
- * either the free space info is not persistant
+ * either the free space info is not persistent
* (!H5F_addr_defined(fspace->addr)???) or the section info
* contains no free space data that must be written to file
* ( fspace->serial_sect_count == 0 ).
@@ -487,7 +487,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
*
* Case 1) If either fspace->serial_sect_count == 0 or
* ! H5F_addr_defined(fspace->addr) do nothing as either
- * the free space manager data is not persistant, or the
+ * the free space manager data is not persistent, or the
* section info is empty.
*
* Otherwise, allocate space for the section info in real
@@ -1454,7 +1454,7 @@ H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udat
/* Call 'serialize' callback for this section */
if(sect_cls->serialize) {
if((*sect_cls->serialize)(sect_cls, sect, *udata->image) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't syncronize section")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't synchronize section")
/* Update offset in serialization buffer */
(*udata->image) += sect_cls->serial_size;
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index 11cd722..a58347f 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -2432,7 +2432,7 @@ done:
* returns to 1) above.
*
* Similarly, if it allocates space for its own header, it
- * can go into an infinte loop as it:
+ * can go into an infinite loop as it:
*
* 1) allocates space for the header
*
@@ -2479,7 +2479,7 @@ done:
* enabled when the free space managers are read. To allow
* for this, we must ensure that space allocated for the
* free space manager header and section info is either larger
- * than a page, or resides completely withing a page.
+ * than a page, or resides completely within a page.
*
* Do this by allocating space for the free space header and
* section info starting at page boundaries, and extending
@@ -2532,7 +2532,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
HDassert(fspace->sect_addr == HADDR_UNDEF);
HDassert(fspace->alloc_sect_size == 0);
- /* persistant free space managers must be enabled */
+ /* persistent free space managers must be enabled */
HDassert(f->shared->fs_persist);
/* At present, all free space strategies enable the free space managers.
@@ -2576,7 +2576,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
f, hdr_alloc_size, &eoa_frag_addr, &eoa_frag_size)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space for hdr")
- /* if the file alignement is 1, there should be no
+ /* if the file alignment is 1, there should be no
* eoa fragment. Otherwise, drop any fragment on the floor.
*/
HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
@@ -2616,7 +2616,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
f, sinfo_alloc_size, &eoa_frag_addr, &eoa_frag_size)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space")
- /* if the file alignement is 1, there should be no
+ /* if the file alignment is 1, there should be no
* eoa fragment. Otherwise, drop the fragment on the floor.
*/
HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
@@ -2637,7 +2637,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
* On reflection, no.
*
* On a regular file close, any eviction will not change the
- * the contents of the free space manger(s), as all entries
+ * the contents of the free space manager(s), as all entries
* should have correct file space allocated by the time this
* function is called.
*
@@ -2658,7 +2658,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
/* since space has been allocated for the section info and the sinfo
- * has been inserted into the cache, relinquish owership (i.e. float)
+ * has been inserted into the cache, relinquish ownership (i.e. float)
* the section info.
*/
fspace->sinfo = NULL;
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index f815005..2ab41de 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -321,7 +321,7 @@ struct H5F_file_t {
haddr_t fs_addr[H5F_MEM_PAGE_NTYPES]; /* Address of free space manager info for each type */
H5FS_t *fs_man[H5F_MEM_PAGE_NTYPES]; /* Free space manager for each file space type */
hbool_t first_alloc_dealloc; /* TRUE iff free space managers */
- /* are persistant and have not */
+ /* are persistent and have not */
/* been used accessed for either */
/* allocation or deallocation */
/* since file open. */
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index f06d4d0..979ba7d 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -557,7 +557,7 @@ typedef struct H5F_t H5F_t;
#define H5F_FILE_SPACE_PAGE_SIZE_DEF 4096
/* For paged aggregation: minimum value for file space page size */
#define H5F_FILE_SPACE_PAGE_SIZE_MIN 512
-/* For paged aggregation: maxiumum value for file space page size: 1 gigabyte */
+/* For paged aggregation: maximum value for file space page size: 1 gigabyte */
#define H5F_FILE_SPACE_PAGE_SIZE_MAX 1024*1024*1024
/* For paged aggregation: drop free-space with size <= this threshold for small meta section */
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 97a6d6f..9339b3d 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -823,13 +823,13 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read)
f->shared->eoa_pre_fsm_fsalloc = fsinfo.eoa_pre_fsm_fsalloc;
/* f->shared->eoa_pre_fsm_fsalloc must always be HADDR_UNDEF
- * in the absence of persistant free space managers.
+ * in the absence of persistent free space managers.
*/
/* If the following two conditions are true:
* (1) skipping EOF check (skip_eof_check)
* (2) dropping free-space to the floor (null_fsm_addr)
* skip the asserts as "eoa_pre_fsm_fsalloc" may be undefined
- * for a crashed file with persistant free space managers.
+ * for a crashed file with persistent free space managers.
* #1 and #2 are enabled when the tool h5clear --increment
* option is used.
*/
@@ -839,7 +839,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read)
} /* end if */
/* As "eoa_pre_fsm_fsalloc" may be undefined for a crashed file
- * with persistant free space managers, therefore, set
+ * with persistent free space managers, therefore, set
* "first_alloc_dealloc" when the condition
* "dropping free-space to the floor is true.
* This will ensure that no action is done to settle things on file
diff --git a/src/H5Gname.c b/src/H5Gname.c
index 1ca06b9..c1156ca 100644
--- a/src/H5Gname.c
+++ b/src/H5Gname.c
@@ -1058,7 +1058,7 @@ done:
* Function: H5G_name_replace
*
* Purpose: Search the list of open IDs and replace names according to a
- * particular operation. The operation occured on the
+ * particular operation. The operation occurred on the
* SRC_FILE/SRC_FULL_PATH_R object. The new name (if there is
* one) is NEW_NAME_R. Additional entry location information
* (currently only needed for the 'move' operation) is passed in
diff --git a/src/H5Gobj.c b/src/H5Gobj.c
index af0e72c..e93896c 100644
--- a/src/H5Gobj.c
+++ b/src/H5Gobj.c
@@ -222,7 +222,7 @@ H5G__obj_create_real(H5F_t *f, const H5O_ginfo_t *ginfo,
size_t pline_size = 0; /* Size of the pipeline message */
size_t link_size; /* Size of a link message */
- /* Calculate message size infomation, for creating group's object header */
+ /* Calculate message size information, for creating group's object header */
linfo_size = H5O_msg_size_f(f, gcpl_id, H5O_LINFO_ID, linfo, (size_t)0);
HDassert(linfo_size);
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 84d5c5f..0c5d3aa 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -702,12 +702,12 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len,
*
* Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
*
- * Note that decendants need not be clean if the pre_serialize call
+ * Note that descendants need not be clean if the pre_serialize call
* is made during a cache serialization instead of an entry or cache
* flush.
*
* Note also that with the recent change in the definition of flush
- * dependency, not all decendants need be clean -- only direct flush
+ * dependency, not all descendants need be clean -- only direct flush
* dependency children.
*
* Finally, observe that the H5HF__cache_verify_hdr_descendants_clean()
@@ -2643,11 +2643,11 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size)
*
* The implementation of flush dependencies has been changed.
* Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
+ * flushed if and only if all its flush dependency descendants
* were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
* children are clean, regardless of any other dirty
- * decendants.
+ * descendants.
*
* Further, metadata cache entries are now allowed to have
* multiple flush dependency parents.
@@ -2815,7 +2815,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
H5_BEGIN_TAG(hdr->heap_addr)
if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.")
H5_END_TAG
@@ -2889,7 +2889,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
H5_BEGIN_TAG(hdr->heap_addr)
if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.")
H5_END_TAG
@@ -2912,7 +2912,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
if(unprotect_root_iblock) {
HDassert(root_iblock);
if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.")
} /* end if */
} /* end else */
} /* end if */
@@ -2982,7 +2982,7 @@ done:
* Function: H5HF__cache_verify_iblock_descendants_clean
*
* Purpose: Sanity checking routine that verifies that all indirect
- * and direct blocks that are decendents of the supplied
+ * and direct blocks that are descendants of the supplied
* instance of H5HF_indirect_t are clean. Set *clean
* to TRUE if this is the case, and to FALSE otherwise.
*
@@ -3009,11 +3009,11 @@ done:
*
* The implementation of flush dependencies has been changed.
* Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
+ * flushed if and only if all its flush dependency descendants
* were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
* children are clean, regardless of any other dirty
- * decendants.
+ * descendants.
*
* Further, metadata cache entries are now allowed to have
* multiple flush dependency parents.
@@ -3126,11 +3126,11 @@ done:
*
* The implementation of flush dependencies has been changed.
* Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
+ * flushed if and only if all its flush dependency descendants
* were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
* children are clean, regardless of any other dirty
- * decendants.
+ * descendants.
*
* Further, metadata cache entries are now allowed to have
* multiple flush dependency parents.
@@ -3290,11 +3290,11 @@ done:
*
* The implementation of flush dependencies has been changed.
* Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
+ * flushed if and only if all its flush dependency descendants
* were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
* children are clean, regardless of any other dirty
- * decendants.
+ * descendants.
*
* Further, metadata cache entries are now allowed to have
* multiple flush dependency parents.
@@ -3468,7 +3468,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
H5_BEGIN_TAG(iblock->hdr->heap_addr)
if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.")
H5_END_TAG
@@ -3480,7 +3480,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
/* pointer to the entry. This is very slimy -- */
/* come up with a better solution. */
if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.")
HDassert(child_iblock);
} /* end else */
} /* end if */
@@ -3519,7 +3519,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
/* if we protected the child iblock, unprotect it now */
if(unprotect_child_iblock) {
if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.")
} /* end if */
} /* end if */
} /* end if */
diff --git a/src/H5HFdtable.c b/src/H5HFdtable.c
index 563e8c5..6e9429a 100644
--- a/src/H5HFdtable.c
+++ b/src/H5HFdtable.c
@@ -338,7 +338,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect
/* Check for multi-row span */
if(start_row != end_row) {
- /* Accomodate partial starting row */
+ /* Accommodate partial starting row */
if(start_col > 0) {
acc_span_size = dtable->row_block_size[start_row] *
(dtable->cparam.width - start_col);
@@ -352,7 +352,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect
start_row++;
} /* end while */
- /* Accomodate partial ending row */
+ /* Accommodate partial ending row */
acc_span_size += dtable->row_block_size[start_row] *
(end_col + 1);
} /* end if */
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index b1b8574..8166d88 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -46,13 +46,13 @@
/* Limit on the size of the max. direct block size */
/* (This is limited to 32-bits currently, because I think it's unlikely to
* need to be larger, the 32-bit limit for H5VM_log2_of2(n), and
- * some offsets/sizes are encoded with a maxiumum of 32-bits - QAK)
+ * some offsets/sizes are encoded with a maximum of 32-bits - QAK)
*/
#define H5HF_MAX_DIRECT_SIZE_LIMIT ((hsize_t)2 * 1024 * 1024 * 1024)
/* Limit on the width of the doubling table */
/* (This is limited to 16-bits currently, because I think it's unlikely to
- * need to be larger, and its encoded with a maxiumum of 16-bits - QAK)
+ * need to be larger, and its encoded with a maximum of 16-bits - QAK)
*/
#define H5HF_WIDTH_LIMIT (64 * 1024)
#endif /* NDEBUG */
diff --git a/src/H5HFspace.c b/src/H5HFspace.c
index 5d659a6..5260ae2 100644
--- a/src/H5HFspace.c
+++ b/src/H5HFspace.c
@@ -311,7 +311,7 @@ H5HF__space_revert_root(const H5HF_hdr_t *hdr)
/* Only need to scan the sections if the free space has been initialized */
if(hdr->fspace)
- /* Iterate over all sections, reseting the parent pointers in 'single' sections */
+ /* Iterate over all sections, resetting the parent pointers in 'single' sections */
if(H5FS_sect_iterate(hdr->f, hdr->fspace, H5HF_space_revert_root_cb, NULL) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over sections to reset parent pointers")
diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c
index 79462e9..5cf1c08 100644
--- a/src/H5HFtiny.c
+++ b/src/H5HFtiny.c
@@ -109,7 +109,7 @@ H5HF_tiny_init(H5HF_hdr_t *hdr)
/* Check if tiny objects need an extra byte for their length */
/* (account for boundary condition when length of an object would need an
* extra byte, but using that byte means that the extra length byte is
- * unneccessary)
+ * unnecessary)
*/
if((hdr->id_len - 1) <= H5HF_TINY_LEN_SHORT) {
hdr->tiny_max_len = hdr->id_len - 1;
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index 50b2669..3a770ae 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -300,7 +300,7 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata,
image += heap->obj[0].size;
} /* end if */
else {
- size_t need;
+ size_t need = 0;
unsigned idx;
uint8_t *begin = image;
@@ -420,7 +420,7 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len)
*
* Purpose: Given an appropriately sized buffer and an instance of
* H5HG_heap_t, serialize the global heap for writing to file,
- * and copy the serialized verion into the buffer.
+ * and copy the serialized version into the buffer.
*
*
* Return: Success: SUCCEED
diff --git a/src/H5MF.c b/src/H5MF.c
index 64e91c8..2f60080 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1692,7 +1692,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
} /* end if */
/* Set the ring type in the API context. In most cases, we will
- * need H5AC_RING_RDFSM, so initialy set the ring in
+ * need H5AC_RING_RDFSM, so initially set the ring in
* the context to that value. We will alter this later if needed.
*/
H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring);
@@ -1788,7 +1788,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
HDassert(f->shared->sblock);
/* Set the ring type in the API context. In most cases, we will
- * need H5AC_RING_RDFSM, so initialy set the ring in
+ * need H5AC_RING_RDFSM, so initially set the ring in
* the context to that value. We will alter this later if needed.
*/
H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring);
@@ -1956,7 +1956,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
HDassert(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
/* Set the ring type in the API context. In most cases, we will
- * need H5AC_RING_RDFSM, so initialy set the ring in
+ * need H5AC_RING_RDFSM, so initially set the ring in
* the context to that value. We will alter this later if needed.
*/
H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring);
@@ -2244,7 +2244,7 @@ H5MF_get_freespace(H5F_t *f, hsize_t *tot_space, hsize_t *meta_size)
HDassert(f->shared->lf);
/* Set the ring type in the API context. In most cases, we will
- * need H5AC_RING_RDFSM, so initialy set the ring in
+ * need H5AC_RING_RDFSM, so initially set the ring in
* the context to that value. We will alter this later if needed.
*/
H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring);
@@ -2419,7 +2419,7 @@ H5MF_get_free_sections(H5F_t *f, H5FD_mem_t type, size_t nsects, H5F_sect_info_t
sect_udata.sect_idx = 0;
/* Set the ring type in the API context. In most cases, we will
- * need H5AC_RING_RDFSM, so initialy set the ring in
+ * need H5AC_RING_RDFSM, so initially set the ring in
* the context to that value. We will alter this later if needed.
*/
H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring);
@@ -2712,7 +2712,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators")
/* Set the ring type in the DXPL. In most cases, we will
- * need H5AC_RING_MDFSM first, so initialy set the ring in
+ * need H5AC_RING_MDFSM first, so initially set the ring in
* the DXPL to that value. We will alter this later if
* needed.
*/
@@ -2829,7 +2829,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* extension messages will choke if the target message is
* unexpectedly either absent or present.
*
- * Update: This is probably unecessary, as I gather that the
+ * Update: This is probably unnecessary, as I gather that the
* file space manager info message is guaranteed to exist.
* Leave it in for now, but consider removing it.
*/
@@ -3083,7 +3083,7 @@ done:
* of these free space manager(s) only to have the allocation
* result in the free space manager being empty and thus
* obliging us to free the space again. Thus there is the
- * potential for an infinte loop if we want to avoid saving
+ * potential for an infinite loop if we want to avoid saving
* empty free space managers.
*
* Similarly, it is possible that we could allocate space
@@ -3280,7 +3280,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* extension message.
*
* As of this writing, we are solving this problem by
- * simply not supporting persistant FSMs with the split
+ * simply not supporting persistent FSMs with the split
* and multi file drivers.
*
* Current plans are to do away with the multi file
@@ -3296,7 +3296,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
/* ******************* PROBLEM: ********************
*
- * If the file has an alignement other than 1, and if
+ * If the file has an alignment other than 1, and if
* the EOA is not a multiple of this alignment, allocating sapce
* for the section via the VFD info has the potential of generating
* a fragment that will be added to the free space manager. This
@@ -3433,7 +3433,7 @@ H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type)
/* In principle, fsm_type should always be less than
* H5F_MEM_PAGE_LARGE_SUPER whenever paged aggregation
* is not enabled. However, since there is code that does
- * not observe this prinicple, force the result to FALSE if
+ * not observe this principle, force the result to FALSE if
* fsm_type is greater than or equal to H5F_MEM_PAGE_LARGE_SUPER.
*/
if(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER)
diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c
index 5ab1834..3db7f73 100644
--- a/src/H5MFaggr.c
+++ b/src/H5MFaggr.c
@@ -407,7 +407,7 @@ H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
if(f->shared->feature_flags & aggr->feature_flag) {
/*
* If the block being tested adjoins the beginning of the aggregator
- * block, check if the aggregator can accomodate the extension.
+ * block, check if the aggregator can accommodate the extension.
*/
if(H5F_addr_eq(blk_end, aggr->addr)) {
haddr_t eoa; /* EOA for the file */
diff --git a/src/H5O.c b/src/H5O.c
index c5db814..9b832f1 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -946,7 +946,7 @@ done:
* Function: H5Odisable_mdc_flushes
*
* Purpose: To "cork" an object:
- * --keep dirty entries assoicated with the object in the metadata cache
+ * --keep dirty entries associated with the object in the metadata cache
*
* Return: Success: Non-negative
* Failure: Negative
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index f2c31d7..a62a3a3 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -249,7 +249,7 @@ done:
if(NULL == ret_value)
if(attr) {
if(attr->shared) {
- /* Free any dynamicly allocated items */
+ /* Free any dynamically allocated items */
if(H5A__free(attr) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't release attribute info")
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 9d125e8..e8a8433 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -1093,7 +1093,7 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
mesg->native = NULL;
/* Delete old attribute */
- /* (doesn't decrement the link count on shared components becuase
+ /* (doesn't decrement the link count on shared components because
* the "native" pointer has been reset)
*/
if(H5O_release_mesg(udata->f, oh, mesg, FALSE) < 0)
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index 3607839..d65942b 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -1430,9 +1430,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Check for combining two adjacent 'null' messages */
if((udata->file_intent & H5F_ACC_RDWR) &&
- H5O_NULL_ID == id && oh->nmesgs > 0 &&
- H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id &&
- oh->mesg[oh->nmesgs - 1].chunkno == chunkno) {
+ H5O_NULL_ID == id && oh->nmesgs > 0 &&
+ H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id &&
+ oh->mesg[oh->nmesgs - 1].chunkno == chunkno) {
+
size_t mesgno; /* Current message to operate on */
/* Combine adjacent null messages */
@@ -1467,13 +1468,13 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Point unknown messages at 'unknown' message class */
/* (Usually from future versions of the library) */
- if(id >= H5O_UNKNOWN_ID ||
+ if(id >= H5O_UNKNOWN_ID ||
#ifdef H5O_ENABLE_BOGUS
- id == H5O_BOGUS_VALID_ID ||
+ id == H5O_BOGUS_VALID_ID ||
#endif
- NULL == H5O_msg_class_g[id]) {
+ NULL == H5O_msg_class_g[id]) {
- H5O_unknown_t *unknown; /* Pointer to "unknown" message info */
+ H5O_unknown_t *unknown; /* Pointer to "unknown" message info */
/* Allocate "unknown" message info */
if(NULL == (unknown = H5FL_MALLOC(H5O_unknown_t)))
@@ -1490,9 +1491,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Check for "fail if unknown" message flags */
if(((udata->file_intent & H5F_ACC_RDWR) &&
- (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE))
- || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS))
- HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found")
+ (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE))
+ || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS))
+ HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found")
/* Check for "mark if unknown" message flag, etc. */
else if((flags & H5O_MSG_FLAG_MARK_IF_UNKNOWN) &&
!(flags & H5O_MSG_FLAG_WAS_UNKNOWN) &&
@@ -1543,7 +1544,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
H5O_refcount_t *refcount;
/* Decode ref. count message */
- HDassert(oh->version > H5O_VERSION_1);
+ if(oh->version <= H5O_VERSION_1)
+ HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "object header version does not support reference count message")
refcount = (H5O_refcount_t *)(H5O_MSG_REFCOUNT->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, mesg->raw);
/* Save 'native' form of ref. count message */
@@ -1614,6 +1616,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
} /* end if */
done:
+ if(ret_value < 0 && udata->cont_msg_info->msgs) {
+ udata->cont_msg_info->msgs = (H5O_chunk_t *)H5FL_SEQ_FREE(H5O_cont_t, udata->cont_msg_info->msgs);
+ udata->cont_msg_info->alloc_nmsgs = 0;
+ }
FUNC_LEAVE_NOAPI(ret_value)
} /* H5O__chunk_deserialize() */
diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c
index ad64297..591ac4a 100644
--- a/src/H5Ocache_image.c
+++ b/src/H5Ocache_image.c
@@ -313,7 +313,7 @@ H5O__mdci_delete(H5F_t *f, H5O_t *open_oh, void *_mesg)
* space allocations / deallocations prior to the free of the
* cache image. Verify this to the extent possible.
*
- * If the hack to work around the persistant self referential
+ * If the hack to work around the persistent self referential
* free space manager issue is NOT in use, just call H5MF_xfree()
* to release the cache iamge. In principle, we should be able
* to just reduce the EOA to the base address of the cache
diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c
index e6865f1..2e628f4 100644
--- a/src/H5Ocopy.c
+++ b/src/H5Ocopy.c
@@ -1106,7 +1106,7 @@ H5O__copy_header(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
HDassert(H5F_addr_defined(oloc_src->addr));
HDassert(oloc_dst->file);
- /* Intialize copy info before errors can be thrown */
+ /* Initialize copy info before errors can be thrown */
HDmemset(&cpy_info, 0, sizeof(H5O_copy_t));
/* Get the copy property list */
diff --git a/src/H5Ofill.c b/src/H5Ofill.c
index 932241f..da9829b 100644
--- a/src/H5Ofill.c
+++ b/src/H5Ofill.c
@@ -307,11 +307,13 @@ done:
*-------------------------------------------------------------------------
*/
static void *
-H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh,
+H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh,
unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags,
size_t H5_ATTR_UNUSED p_size, const uint8_t *p)
{
H5O_fill_t *fill = NULL; /* Decoded fill value message */
+ htri_t exists = FALSE;
+ H5T_t *dt = NULL;
void *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -332,6 +334,19 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh,
/* Only decode the fill value itself if there is one */
if(fill->size > 0) {
+ H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t);
+
+ /* Get the datatype message */
+ if((exists = H5O_msg_exists_oh(open_oh, H5O_DTYPE_ID)) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read object header")
+ if(exists) {
+ if((dt = H5O_msg_read_oh(f, open_oh, H5O_DTYPE_ID, NULL)) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "can't read DTYPE message")
+ /* Verify size */
+ if(fill->size != H5T_GET_SIZE(dt))
+ HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "inconsistent fill value size")
+ }
+
if(NULL == (fill->buf = H5MM_malloc((size_t)fill->size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value")
HDmemcpy(fill->buf, p, (size_t)fill->size);
@@ -344,6 +359,9 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh,
ret_value = (void*)fill;
done:
+ if(dt)
+ H5O_msg_free(H5O_DTYPE_ID, dt);
+
if(!ret_value && fill) {
if(fill->buf)
H5MM_xfree(fill->buf);
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index d8f05f0..5f16837 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -125,8 +125,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh,
/* Dimensionality */
ndims = *p++;
- if(ndims > H5O_LAYOUT_NDIMS)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
+ if(!ndims || ndims > H5O_LAYOUT_NDIMS)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is out of range")
/* Layout class */
mesg->type = (H5D_layout_t)*p++;
diff --git a/src/H5Omessage.c b/src/H5Omessage.c
index 930be9b..e156dcd 100644
--- a/src/H5Omessage.c
+++ b/src/H5Omessage.c
@@ -239,7 +239,7 @@ done:
* Purpose: Modifies an existing message or creates a new message.
*
* The UPDATE_FLAGS argument are flags that allow the caller
- * to skip updating the modification time or reseting the message
+ * to skip updating the modification time or resetting the message
* data. This is useful when several calls to H5O_msg_write will be
* made in a sequence.
*
@@ -296,7 +296,7 @@ done:
* Purpose: Modifies an existing message or creates a new message.
*
* The UPDATE_FLAGS argument are flags that allow the caller
- * to skip updating the modification time or reseting the message
+ * to skip updating the modification time or resetting the message
* data. This is useful when several calls to H5O_msg_write will be
* made in a sequence.
*
@@ -343,7 +343,7 @@ done:
* Purpose: Modifies an existing message or creates a new message.
*
* The UPDATE_FLAGS argument are flags that allow the caller
- * to skip updating the modification time or reseting the message
+ * to skip updating the modification time or resetting the message
* data. This is useful when several calls to H5O_msg_write will be
* made in a sequence.
*
diff --git a/src/H5Oshared.c b/src/H5Oshared.c
index 047f90e..328ba4b 100644
--- a/src/H5Oshared.c
+++ b/src/H5Oshared.c
@@ -640,7 +640,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5O__shared_post_copy_file
*
- * Purpose: Delate a shared message and replace with a new one.
+ * Purpose: Delete a shared message and replace with a new one.
* The function is needed at cases such as coping a shared reg_ref attribute.
* When a shared reg_ref attribute is copied from one file to
* another, the values in file need to be replaced. The only way
diff --git a/src/H5P.c b/src/H5P.c
index f163e9e..733af42 100644
--- a/src/H5P.c
+++ b/src/H5P.c
@@ -399,7 +399,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
@@ -581,7 +581,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index d69b063..cb13ff3 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -1332,7 +1332,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5P__dcrt_ext_file_list_get
*
- * Purpose: Copies an external file lsit property when it's retrieved from a property list
+ * Purpose: Copies an external file list property when it's retrieved from a property list
*
* Return: Success: Non-negative
* Failure: Negative
@@ -2245,7 +2245,7 @@ H5Pset_virtual(hid_t dcpl_id, hid_t vspace_id, const char *src_file_name,
done:
/* Set VDS layout information in property list */
- /* (Even on faliure, so there's not a mangled layout struct in the list) */
+ /* (Even on failure, so there's not a mangled layout struct in the list) */
if(retrieved_layout) {
if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &virtual_layout) < 0) {
HDONE_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set layout")
diff --git a/src/H5Pdeprec.c b/src/H5Pdeprec.c
index 7f96333..226d206 100644
--- a/src/H5Pdeprec.c
+++ b/src/H5Pdeprec.c
@@ -202,7 +202,7 @@
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
@@ -383,7 +383,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index b56c137..e6eed32 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -1035,7 +1035,7 @@ done:
* for the type conversion buffer and background buffer and
* optionally supply pointers to application-allocated buffers.
* If the buffer size is smaller than the entire amount of data
- * being transfered between application and file, and a type
+ * being transferred between application and file, and a type
* conversion buffer or background buffer is required then
* strip mining will be used.
*
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index eded286..7ba9c11 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -396,7 +396,7 @@ static const hbool_t H5F_def_coll_md_write_flag_g = H5F_ACS_COLL_MD_WRITE_FLAG_D
static const H5AC_cache_image_config_t H5F_def_mdc_initCacheImageCfg_g = H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF; /* Default metadata cache image settings */
static const size_t H5F_def_page_buf_size_g = H5F_ACS_PAGE_BUFFER_SIZE_DEF; /* Default page buffer size */
static const unsigned H5F_def_page_buf_min_meta_perc_g = H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEF; /* Default page buffer minimum metadata size */
-static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer minumum raw data size */
+static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer mininum raw data size */
/*-------------------------------------------------------------------------
@@ -1713,7 +1713,7 @@ H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown image config version.")
/* If we ever support multiple versions of H5AC_cache_config_t, we
- * will have to get the cannonical version here, and then translate
+ * will have to get the canonical version here, and then translate
* to the version of the structure supplied.
*/
@@ -1808,7 +1808,7 @@ H5Pget_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.")
/* If we ever support multiple versions of H5AC_cache_config_t, we
- * will have to get the cannonical version here, and then translate
+ * will have to get the canonical version here, and then translate
* to the version of the structure supplied.
*/
@@ -2322,7 +2322,7 @@ done:
* necessary to represent features used.
* (This is the "make certain to take advantage of <new feature>
* in the file format" use case (maybe <new feature> is smaller
- * or scales better than an ealier version, which would otherwise
+ * or scales better than an earlier version, which would otherwise
* be used))
*
* LOW = H5F_FORMAT_1_2_0, HIGH = H5F_FORMAT_1_6_0 => creates objects
@@ -2549,7 +2549,7 @@ H5Pset_file_image(hid_t fapl_id, void *buf_ptr, size_t buf_len)
/* validate parameters */
if(!(((buf_ptr == NULL) && (buf_len == 0)) || ((buf_ptr != NULL) && (buf_len > 0))))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistant buf_ptr and buf_len")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistent buf_ptr and buf_len")
/* Get the plist structure */
if(NULL == (fapl = H5P_object_verify(fapl_id, H5P_FILE_ACCESS)))
@@ -2652,7 +2652,7 @@ H5Pget_file_image(hid_t fapl_id, void **buf_ptr_ptr, size_t *buf_len_ptr)
if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &image_info) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info")
- /* verify file image field consistancy */
+ /* verify file image field consistency */
HDassert(((image_info.buffer != NULL) && (image_info.size > 0)) ||
((image_info.buffer == NULL) && (image_info.size == 0)));
@@ -2728,7 +2728,7 @@ H5Pset_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback
if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get old file image info")
- /* verify file image field consistancy */
+ /* verify file image field consistency */
HDassert(((info.buffer != NULL) && (info.size > 0)) ||
((info.buffer == NULL) && (info.size == 0)));
@@ -2804,7 +2804,7 @@ H5Pget_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback
if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info")
- /* verify file image field consistancy */
+ /* verify file image field consistency */
HDassert(((info.buffer != NULL) && (info.size > 0)) ||
((info.buffer == NULL) && (info.size == 0)));
@@ -2858,7 +2858,7 @@ H5P__file_image_info_copy(void *value)
info = (H5FD_file_image_info_t *)value;
- /* verify file image field consistancy */
+ /* verify file image field consistency */
HDassert(((info->buffer != NULL) && (info->size > 0)) ||
((info->buffer == NULL) && (info->size == 0)));
@@ -2932,7 +2932,7 @@ H5P__file_image_info_free(void *value)
info = (H5FD_file_image_info_t *)value;
- /* Verify file image field consistancy */
+ /* Verify file image field consistency */
HDassert(((info->buffer != NULL) && (info->size > 0)) ||
((info->buffer == NULL) && (info->size == 0)));
diff --git a/src/H5Pfcpl.c b/src/H5Pfcpl.c
index 6b0d2c0..f90dae5 100644
--- a/src/H5Pfcpl.c
+++ b/src/H5Pfcpl.c
@@ -545,7 +545,7 @@ H5Pset_sym_k(hid_t plist_id, unsigned ik, unsigned lk)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "istore IK value exceeds maximum B-tree entries");
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes");
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes");
btree_k[H5B_SNODE_ID] = ik;
if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree nodes");
@@ -650,10 +650,10 @@ H5Pset_istore_k(hid_t plist_id, unsigned ik)
/* Set value */
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes");
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes");
btree_k[H5B_CHUNK_ID] = ik;
if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree interanl nodes");
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree internal nodes");
done:
FUNC_LEAVE_API(ret_value)
@@ -698,7 +698,7 @@ H5Pget_istore_k(hid_t plist_id, unsigned *ik /*out */ )
/* Get value */
if(ik) {
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes");
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes");
*ik = btree_k[H5B_CHUNK_ID];
} /* end if */
diff --git a/src/H5Pint.c b/src/H5Pint.c
index 0e78463..6a0cc14 100644
--- a/src/H5Pint.c
+++ b/src/H5Pint.c
@@ -1398,7 +1398,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *
NAME
H5P_access_class
PURPOSE
- Internal routine to increment or decrement list & class dependancies on a
+ Internal routine to increment or decrement list & class dependencies on a
property list class
USAGE
herr_t H5P_access_class(pclass,mod)
@@ -1407,7 +1407,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *
RETURNS
Returns non-negative on success, negative on failure.
DESCRIPTION
- Increment/Decrement the class or list dependancies for a given class.
+ Increment/Decrement the class or list dependencies for a given class.
This routine is the final arbiter on decisions about actually releasing a
class in memory, such action is only taken when the reference counts for
both dependent classes & lists reach zero.
@@ -1425,19 +1425,19 @@ H5P_access_class(H5P_genclass_t *pclass, H5P_class_mod_t mod)
HDassert(mod > H5P_MOD_ERR && mod < H5P_MOD_MAX);
switch(mod) {
- case H5P_MOD_INC_CLS: /* Increment the dependant class count*/
+ case H5P_MOD_INC_CLS: /* Increment the dependent class count*/
pclass->classes++;
break;
- case H5P_MOD_DEC_CLS: /* Decrement the dependant class count*/
+ case H5P_MOD_DEC_CLS: /* Decrement the dependent class count*/
pclass->classes--;
break;
- case H5P_MOD_INC_LST: /* Increment the dependant list count*/
+ case H5P_MOD_INC_LST: /* Increment the dependent list count*/
pclass->plists++;
break;
- case H5P_MOD_DEC_LST: /* Decrement the dependant list count*/
+ case H5P_MOD_DEC_LST: /* Decrement the dependent list count*/
pclass->plists--;
break;
@@ -1586,12 +1586,12 @@ H5P_create_class(H5P_genclass_t *par_class, const char *name, H5P_plist_type_t t
/* Allocate room for the class */
if(NULL == (pclass = H5FL_CALLOC(H5P_genclass_t)))
- HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class allocation failed")
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class allocation failed")
/* Set class state */
pclass->parent = par_class;
if(NULL == (pclass->name = H5MM_xstrdup(name)))
- HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class name allocation failed")
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class name allocation failed")
pclass->type = type;
pclass->nprops = 0; /* Classes are created without properties initially */
pclass->plists = 0; /* No properties lists of this class yet */
@@ -2026,7 +2026,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
@@ -2258,7 +2258,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
@@ -2499,7 +2499,7 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The 'set' callback function may be useful to range check the value being
- set for the property or may perform some tranformation/translation of the
+ set for the property or may perform some transformation/translation of the
value set. The 'get' callback would then [probably] reverse the
transformation, etc. A single 'get' or 'set' callback could handle
multiple properties by performing different actions based on the property
@@ -2636,7 +2636,7 @@ H5P__do_prop(H5P_genplist_t *plist, const char *name, H5P_do_plist_op_t plist_op
/* Find property in changed list */
if(NULL != (prop = (H5P_genprop_t *)H5SL_search(plist->props, name))) {
- /* Call the 'found in propery list' callback */
+ /* Call the 'found in property list' callback */
if((*plist_op)(plist, name, prop, udata) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTOPERATE, FAIL, "can't operate on property")
} /* end if */
@@ -4020,7 +4020,7 @@ H5P_iterate_plist(const H5P_genplist_t *plist, hbool_t iter_all_prop, int *idx,
/* Check for iterating over all properties, or just non-default ones */
if(iter_all_prop) {
- /* Walk up the class hiearchy */
+ /* Walk up the class hierarchy */
tclass = plist->pclass;
while(tclass != NULL) {
/* Iterate over properties in property list class */
@@ -4737,7 +4737,7 @@ H5P_copy_prop_pclass(hid_t dst_id, hid_t src_id, const char *name)
/* Sanity check */
HDassert(name);
- /* Get propery list classes */
+ /* Get property list classes */
if(NULL == (src_pclass = (H5P_genclass_t *)H5I_object(src_id)))
HGOTO_ERROR(H5E_PLIST, H5E_NOTFOUND, FAIL, "source property class object doesn't exist")
if(NULL == (dst_pclass = (H5P_genclass_t *)H5I_object(dst_id)))
@@ -4854,8 +4854,8 @@ done:
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
The property list class 'close' callback routine is not called from
- here, it must have been check for and called properly prior to this routine
- being called
+ here, it must have been checked for and called properly prior to this routine
+ being called.
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
@@ -4981,7 +4981,7 @@ H5P_close(void *_plist)
tclass=tclass->parent;
} /* end while */
- /* Decrement class's dependant property list value! */
+ /* Decrement class's dependent property list value! */
if(H5P_access_class(plist->pclass,H5P_MOD_DEC_LST) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "Can't decrement class ref count")
diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h
index 13463ae..60b2363 100644
--- a/src/H5Ppkg.h
+++ b/src/H5Ppkg.h
@@ -51,10 +51,10 @@ typedef enum {
/* Define enum for modifications to class */
typedef enum {
H5P_MOD_ERR=(-1), /* Indicate an error */
- H5P_MOD_INC_CLS, /* Increment the dependant class count*/
- H5P_MOD_DEC_CLS, /* Decrement the dependant class count*/
- H5P_MOD_INC_LST, /* Increment the dependant list count*/
- H5P_MOD_DEC_LST, /* Decrement the dependant list count*/
+ H5P_MOD_INC_CLS, /* Increment the dependent class count*/
+ H5P_MOD_DEC_CLS, /* Decrement the dependent class count*/
+ H5P_MOD_INC_LST, /* Increment the dependent list count*/
+ H5P_MOD_DEC_LST, /* Decrement the dependent list count*/
H5P_MOD_INC_REF, /* Increment the ID reference count*/
H5P_MOD_DEC_REF, /* Decrement the ID reference count*/
H5P_MOD_MAX /* Upper limit on class modifications */
@@ -89,7 +89,7 @@ struct H5P_genclass_t {
size_t nprops; /* Number of properties in class */
unsigned plists; /* Number of property lists that have been created since the last modification to the class */
unsigned classes; /* Number of classes that have been derived since the last modification to the class */
- unsigned ref_count; /* Number of oustanding ID's open on this class object */
+ unsigned ref_count; /* Number of outstanding ID's open on this class object */
hbool_t deleted; /* Whether this class has been deleted and is waiting for dependent classes & proplists to close */
unsigned revision; /* Revision number of a particular class (global) */
H5SL_t *props; /* Skip list containing properties */
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index a2c0418..58f3d18 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -139,7 +139,7 @@ typedef enum H5D_mpio_actual_chunk_opt_mode_t {
typedef enum H5D_mpio_actual_io_mode_t {
/* The following four values are conveniently defined as a bit field so that
- * we can switch from the default to indpendent or collective and then to
+ * we can switch from the default to independent or collective and then to
* mixed without having to check the original value.
*
* NO_COLLECTIVE means that either collective I/O wasn't requested or that
diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h
index a9a4fd9..bb458a7 100644
--- a/src/H5SMpkg.h
+++ b/src/H5SMpkg.h
@@ -96,7 +96,7 @@
#define H5SM_B2_SPLIT_PERCENT 100
#define H5SM_B2_MERGE_PERCENT 40
-#define H5SM_LIST_VERSION 0 /* Verion of Shared Object Header Message List Indexes */
+#define H5SM_LIST_VERSION 0 /* Version of Shared Object Header Message List Indexes */
/****************************/
/* Package Typedefs */
diff --git a/src/H5Sall.c b/src/H5Sall.c
index 11dd8e1..28395b1 100644
--- a/src/H5Sall.c
+++ b/src/H5Sall.c
@@ -432,7 +432,7 @@ H5S__all_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace. Offset is irrelevant for this type of selection.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 47eb573..5653209 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -853,7 +853,7 @@ H5S__hyper_iter_next(H5S_sel_iter_t *iter, size_t nelem)
/* Check if we are finished with the spans in the tree */
if(curr_dim >= 0) {
- /* Walk back down the iterator positions, reseting them */
+ /* Walk back down the iterator positions, resetting them */
while(curr_dim < fast_dim) {
HDassert(curr_span);
HDassert(curr_span->down);
@@ -1037,7 +1037,7 @@ H5S__hyper_iter_next_block(H5S_sel_iter_t *iter)
/* Check if we are finished with the spans in the tree */
if(curr_dim >= 0) {
- /* Walk back down the iterator positions, reseting them */
+ /* Walk back down the iterator positions, resetting them */
while(curr_dim < fast_dim) {
HDassert(curr_span);
HDassert(curr_span->down);
@@ -1704,7 +1704,7 @@ done:
RETURNS
TRUE if the selection fits within the extent, FALSE if it does not
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
@@ -1762,7 +1762,7 @@ done:
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
@@ -2052,7 +2052,7 @@ H5S__hyper_serialize_helper(const H5S_hyper_span_info_t *spans,
HDassert(rank < H5O_LAYOUT_NDIMS);
HDassert(p && pp);
- /* Walk through the list of spans, recursing or outputing them */
+ /* Walk through the list of spans, recursing or outputting them */
curr = spans->head;
while(curr != NULL) {
/* Recurse if this node has down spans */
@@ -2458,7 +2458,7 @@ H5S__hyper_span_blocklist(const H5S_hyper_span_info_t *spans, hsize_t start[],
HDassert(numblocks && *numblocks > 0);
HDassert(buf && *buf);
- /* Walk through the list of spans, recursing or outputing them */
+ /* Walk through the list of spans, recursing or outputting them */
curr = spans->head;
while(curr != NULL && *numblocks > 0) {
/* Recurse if this node has down spans */
@@ -3509,7 +3509,7 @@ done:
NAME
H5S__hyper_add_span_element_helper
PURPOSE
- Add a single elment to a span tree
+ Add a single element to a span tree
USAGE
herr_t H5S_hyper_add_span_element_helper(prev_span, span_tree, rank, coords)
H5S_hyper_span_info_t *span_tree; IN/OUT: Pointer to span tree to append to
@@ -3713,7 +3713,7 @@ done:
NAME
H5S_hyper_add_span_element
PURPOSE
- Add a single elment to a span tree
+ Add a single element to a span tree
USAGE
herr_t H5S_hyper_add_span_element(space, span_tree, rank, coords)
H5S_t *space; IN/OUT: Pointer to dataspace to add coordinate to
@@ -8101,7 +8101,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter,
/* Check if we have more spans in the tree */
if(curr_dim >= 0) {
- /* Walk back down the iterator positions, reseting them */
+ /* Walk back down the iterator positions, resetting them */
while((unsigned)curr_dim < fast_dim) {
HDassert(curr_span);
HDassert(curr_span->down);
@@ -8280,7 +8280,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter,
break;
} /* end if */
else {
- /* Walk back down the iterator positions, reseting them */
+ /* Walk back down the iterator positions, resetting them */
while((unsigned)curr_dim < fast_dim) {
HDassert(curr_span);
HDassert(curr_span->down);
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 5c65119..db81ffc 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -909,10 +909,9 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
else
inner_type = outer_type;
} /* end for */
-/***************************
-* End of loop, walking
-* thru dimensions.
-***************************/
+/******************************************
+* End of loop, walking through dimensions.
+*******************************************/
/* At this point inner_type is actually the outermost type, even for 0-trip loop */
*new_type = inner_type;
diff --git a/src/H5Snone.c b/src/H5Snone.c
index 19d0b5f..a46a71a 100644
--- a/src/H5Snone.c
+++ b/src/H5Snone.c
@@ -408,7 +408,7 @@ H5S_none_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace. Offset is irrelevant for this type of selection.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 442bb2a..fb8e311 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -676,7 +676,7 @@ done:
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index 4462295..ec74523 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -160,7 +160,7 @@ H5S_select_release(H5S_t *ds)
HDassert(ds);
/* Call the selection type's release function */
- if((ret_value = (*ds->select.type->release)(ds)) < 0)
+ if((ds->select.type) && ((ret_value = (*ds->select.type->release)(ds)) < 0))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection")
done:
@@ -361,7 +361,7 @@ H5S_get_select_npoints(const H5S_t *space)
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
@@ -403,7 +403,7 @@ done:
TRUE if the selection fits within the extent, FALSE if it does not and
Negative on an error.
DESCRIPTION
- Determines if the current selection at the current offet fits within the
+ Determines if the current selection at the current offset fits within the
extent for the dataspace.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
diff --git a/src/H5T.c b/src/H5T.c
index c220890..fa97506 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -309,7 +309,7 @@ static H5T_path_t *H5T__path_find_real(const H5T_t *src, const H5T_t *dst,
/* Library Private Variables */
/*****************************/
-/* The native endianess of the platform */
+/* The native endianness of the platform */
H5T_order_t H5T_native_order_g = H5T_ORDER_ERROR;
diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c
index 4e4a551..1719f8f 100644
--- a/src/H5Tcommit.c
+++ b/src/H5Tcommit.c
@@ -385,7 +385,7 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id)
if(H5T_set_version(file, type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set version of datatype")
- /* Calculate message size infomation, for creating object header */
+ /* Calculate message size information, for creating object header */
dtype_size = H5O_msg_size_f(file, tcpl_id, H5O_DTYPE_ID, type, (size_t)0);
HDassert(dtype_size);
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index f3af9e1..803a6da 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -109,7 +109,7 @@
* least as large as the destination. Overflows can occur when
* the destination is narrower than the source.
*
- * xF: Integers to float-point(float or double) values where the desination
+ * xF: Integers to float-point(float or double) values where the destination
* is at least as wide as the source. This case cannot generate
* overflows.
*
@@ -2102,7 +2102,7 @@ H5T__conv_struct_subset(const H5T_cdata_t *cdata)
*
* For each element do
* For I=1..NELMTS do
- * If sizeof detination type <= sizeof source type then
+ * If sizeof destination type <= sizeof source type then
* Convert member to destination type;
* Move member as far left as possible;
*
@@ -2314,7 +2314,7 @@ done:
* function. The algorithm is basically:
*
* For each member of the struct
- * If sizeof detination type <= sizeof source type then
+ * If sizeof destination type <= sizeof source type then
* Convert member to destination type for all elements
* Move memb to BKG buffer for all elements
* Else
@@ -2421,7 +2421,7 @@ H5T__conv_struct_opt(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
offset -= src_memb->size;
if(dst_memb->size > src->shared->size-offset) {
cdata->priv = H5T_conv_struct_free(priv);
- HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "convertion is unsupported by this function")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "conversion is unsupported by this function")
} /* end if */
} /* end if */
} /* end for */
@@ -4250,7 +4250,7 @@ H5T__conv_f_f(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
/*
* The exponent is too small to fit in the exponent field,
* but by shifting the mantissa to the right we can
- * accomodate that value. The mantissa of course is no
+ * accommodate that value. The mantissa of course is no
* longer normalized.
*/
mrsh += (size_t)(1 - expo);
diff --git a/src/H5Toffset.c b/src/H5Toffset.c
index 668e730..c0e94fc 100644
--- a/src/H5Toffset.c
+++ b/src/H5Toffset.c
@@ -33,12 +33,12 @@ static herr_t H5T_set_offset(const H5T_t *dt, size_t offset);
* Function: H5Tget_offset
*
* Purpose: Retrieves the bit offset of the first significant bit. The
- * signficant bits of an atomic datum can be offset from the
+ * significant bits of an atomic datum can be offset from the
* beginning of the memory for that datum by an amount of
* padding. The `offset' property specifies the number of bits
* of padding that appear to the "right of" the value. That is,
* if we have a 32-bit datum with 16-bits of precision having
- * the value 0x1122 then it will be layed out in memory as (from
+ * the value 0x1122 then it will be laid out in memory as (from
* small byte address toward larger byte addresses):
*
* Big Big Little Little
@@ -84,12 +84,12 @@ done:
* Function: H5T_get_offset
*
* Purpose: Retrieves the bit offset of the first significant bit. The
- * signficant bits of an atomic datum can be offset from the
+ * significant bits of an atomic datum can be offset from the
* beginning of the memory for that datum by an amount of
* padding. The `offset' property specifies the number of bits
* of padding that appear to the "right of" the value. That is,
* if we have a 32-bit datum with 16-bits of precision having
- * the value 0x1122 then it will be layed out in memory as (from
+ * the value 0x1122 then it will be laid out in memory as (from
* small byte address toward larger byte addresses):
*
* Big Big Little Little
@@ -134,12 +134,12 @@ done:
* Function: H5Tset_offset
*
* Purpose: Sets the bit offset of the first significant bit. The
- * signficant bits of an atomic datum can be offset from the
+ * significant bits of an atomic datum can be offset from the
* beginning of the memory for that datum by an amount of
* padding. The `offset' property specifies the number of bits
* of padding that appear to the "right of" the value. That is,
* if we have a 32-bit datum with 16-bits of precision having
- * the value 0x1122 then it will be layed out in memory as (from
+ * the value 0x1122 then it will be laid out in memory as (from
* small byte address toward larger byte addresses):
*
* Big Big Little Little
@@ -203,12 +203,12 @@ done:
* Function: H5T_set_offset
*
* Purpose: Sets the bit offset of the first significant bit. The
- * signficant bits of an atomic datum can be offset from the
+ * significant bits of an atomic datum can be offset from the
* beginning of the memory for that datum by an amount of
* padding. The `offset' property specifies the number of bits
* of padding that appear to the "right of" the value. That is,
* if we have a 32-bit datum with 16-bits of precision having
- * the value 0x1122 then it will be layed out in memory as (from
+ * the value 0x1122 then it will be laid out in memory as (from
* small byte address toward larger byte addresses):
*
* Big Big Little Little
diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h
index 96132b0..e1b996c 100644
--- a/src/H5Tpkg.h
+++ b/src/H5Tpkg.h
@@ -295,7 +295,7 @@ typedef enum H5T_state_t {
H5T_STATE_OPEN /*named constant, open object header */
} H5T_state_t;
- /* This struct is shared between all occurances of an open named type */
+ /* This struct is shared between all occurrences of an open named type */
typedef struct H5T_shared_t {
hsize_t fo_count; /* number of references to this file object */
H5T_state_t state; /*current state of the type */
diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h
index 8051b6d..89cdcfd 100644
--- a/src/H5Tprivate.h
+++ b/src/H5Tprivate.h
@@ -100,7 +100,7 @@ typedef struct H5T_subset_info_t {
/* Forward declarations for prototype arguments */
struct H5O_t;
-/* The native endianess of the platform */
+/* The native endianness of the platform */
H5_DLLVAR H5T_order_t H5T_native_order_g;
/* Private functions */
diff --git a/src/H5Znbit.c b/src/H5Znbit.c
index c0afc38..fe0041c 100644
--- a/src/H5Znbit.c
+++ b/src/H5Znbit.c
@@ -558,7 +558,7 @@ H5Z_set_parms_array(const H5T_t *type, unsigned *cd_values_index,
H5T_t *dtype_base = NULL; /* Array datatype's base datatype */
H5T_class_t dtype_base_class; /* Array datatype's base datatype's class */
size_t dtype_size; /* Array datatype's size (in bytes) */
- htri_t is_vlstring; /* flag indicating if datatype is varible-length string */
+ htri_t is_vlstring; /* flag indicating if datatype is variable-length string */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -667,7 +667,7 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned *cd_values_index,
size_t dtype_member_offset; /* Compound datatype's current member datatype's offset (in bytes) */
size_t dtype_next_member_offset;/* Compound datatype's next member datatype's offset (in bytes) */
size_t dtype_size; /* Compound datatype's size (in bytes) */
- htri_t is_vlstring; /* flag indicating if datatype is varible-length string */
+ htri_t is_vlstring; /* flag indicating if datatype is variable-length string */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h
index fcb2d37..0df05f0 100644
--- a/src/H5Zpublic.h
+++ b/src/H5Zpublic.h
@@ -50,7 +50,7 @@ typedef int H5Z_filter_t;
* unlimited amount, but currently each
* filter uses a bit in a 32-bit field,
* so the format would have to be
- * changed to accomodate that)
+ * changed to accommodate that)
*/
/* Flags for filter definition (stored) */
diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c
index d139696..cdf31a4 100644
--- a/src/H5Zscaleoffset.c
+++ b/src/H5Zscaleoffset.c
@@ -988,7 +988,7 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id)
if(status == H5D_FILL_VALUE_UNDEFINED)
cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_UNDEFINED;
else {
- int need_convert = FALSE; /* Flag indicating convertion of byte order */
+ int need_convert = FALSE; /* Flag indicating conversion of byte order */
cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_DEFINED;
@@ -1047,7 +1047,7 @@ H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_value
uint32_t minbits = 0; /* minimum number of bits to store values */
unsigned long long minval= 0; /* minimum value of input buffer */
enum H5Z_scaleoffset_t type; /* memory type corresponding to dataset datatype */
- int need_convert = FALSE; /* flag indicating convertion of byte order */
+ int need_convert = FALSE; /* flag indicating conversion of byte order */
unsigned char *outbuf = NULL; /* pointer to new output buffer */
unsigned buf_offset = 21; /* buffer offset because of parameters stored in file */
unsigned i; /* index */
diff --git a/src/H5Ztrans.c b/src/H5Ztrans.c
index d4b59a6..67646a0 100644
--- a/src/H5Ztrans.c
+++ b/src/H5Ztrans.c
@@ -329,7 +329,7 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
}
/* The difference of this macro from H5Z_XFORM_DO_OP3 is that it handles the operations when the left operand is empty, like -x or +x.
- * The reason that it's seperated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op,
+ * The reason that it's separated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op,
* these two macros are called in different ways. (SLU 2012/3/20)
*/
#define H5Z_XFORM_DO_OP6(OP) \
@@ -967,7 +967,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Z_new_node
- * Purpose: Create and initilize a new H5Z_node structure.
+ * Purpose: Create and initialize a new H5Z_node structure.
* Return: Success: Valid H5Z_node ptr
* NULLure: NULL
* Programmer: Bill Wendling
@@ -999,7 +999,7 @@ done:
* Purpose: If the transform is trivial, this function applies it.
* Otherwise, it calls H5Z_xform_eval_full to do the full
* transform.
- * Return: SUCCEED if transform applied succesfully, FAIL otherwise
+ * Return: SUCCEED if transform applied successfully, FAIL otherwise
* Programmer: Leon Arber
* 5/1/04
* Modifications:
@@ -1485,7 +1485,7 @@ H5Z_xform_reduce_tree(H5Z_node* tree)
* Purpose: If the root of the tree passed in points to a simple
* arithmetic operation and the left and right subtrees are both
* integer or floating point values, this function does that
- * operation, free the left and rigt subtrees, and replaces
+ * operation, free the left and right subtrees, and replaces
* the root with the result of the operation.
* Return: None.
* Programmer: Leon Arber
diff --git a/src/H5detect.c b/src/H5detect.c
index 69e07fd..60ef656 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -43,7 +43,7 @@ static const char *FileHeader = "\n\
* system or configure has detected those Unix
* features which aren't available. We're not
* running on a Vax or other machine with mixed
- * endianess.
+ * endianness.
*
* Modifications:
*
@@ -691,7 +691,7 @@ H5T__init_native(void)\n\
FUNC_ENTER_PACKAGE\n");
for(i = 0; i < nd; i++) {
- /* The native endianess of this machine */
+ /* The native endianness of this machine */
/* The INFO.perm now contains `-1' for bytes that aren't used and
* are always zero. This happens on the Cray for `short' where
* sizeof(short) is 8, but only the low-order 4 bytes are ever used.
@@ -1075,8 +1075,8 @@ fix_order(int n, int last, int *perm, const char **mesg)
} else {
/*
* Bi-endian machines like VAX.
- * (NOTE: This is not an actual determination of the VAX-endianess.
- * It could have some other endianess and fall into this
+ * (NOTE: This is not an actual determination of the VAX-endianness.
+ * It could have some other endianness and fall into this
* case - JKM & QAK)
*/
HDassert(0 == n % 2);
@@ -1107,7 +1107,7 @@ fix_order(int n, int last, int *perm, const char **mesg)
*
* This function assumes that the exponent occupies higher
* order bits than the mantissa and that the most significant
- * bit of the mantissa is next to the least signficant bit
+ * bit of the mantissa is next to the least significant bit
* of the exponent.
*
*
@@ -1267,7 +1267,7 @@ mark. Bits of integer types are printed as\n\
If the most significant bit of the normalized\n\
mantissa (always a `1' except for `0.0') is\n\
not stored then an `implicit=yes' appears\n\
-under the field description. In thie case,\n\
+under the field description. In this case,\n\
the radix point is still assumed to be\n\
before the first `M' but after the implicit\n\
bit.\n";
diff --git a/src/H5private.h b/src/H5private.h
index 6334f39..8974e46 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -116,7 +116,7 @@
#endif
/*
- * flock() in sys/file.h is used for the implemention of file locking.
+ * flock() in sys/file.h is used for the implementation of file locking.
*/
#if defined(H5_HAVE_FLOCK) && defined(H5_HAVE_SYS_FILE_H)
# include <sys/file.h>
@@ -132,7 +132,7 @@
/*
* Unix ioctls. These are used by h5ls (and perhaps others) to determine a
- * resonable output width.
+ * reasonable output width.
*/
#ifdef H5_HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
diff --git a/src/H5public.h b/src/H5public.h
index 9157366..a4c80cd 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -115,7 +115,7 @@ extern "C" {
/*
* Status return values. Failed integer functions in HDF5 result almost
* always in a negative value (unsigned failing functions sometimes return
- * zero for failure) while successfull return is non-negative (often zero).
+ * zero for failure) while successful return is non-negative (often zero).
* The negative failure value is most commonly -1, but don't bet on it. The
* proper way to detect failure is something like:
*
@@ -298,7 +298,7 @@ typedef enum {
} H5_iter_order_t;
/* Iteration callback values */
-/* (Actually, any postive value will cause the iterator to stop and pass back
+/* (Actually, any positive value will cause the iterator to stop and pass back
* that positive value to the function that called the iterator)
*/
#define H5_ITER_ERROR (-1)
diff --git a/src/H5system.c b/src/H5system.c
index 719b7e0..186d8fa 100644
--- a/src/H5system.c
+++ b/src/H5system.c
@@ -294,13 +294,13 @@ HDfprintf(FILE *stream, const char *fmt, ...)
unsigned short x = (unsigned short)va_arg(ap, unsigned int);
n = fprintf(stream, format_templ, x);
} else if(!*modifier) {
- unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occuring */
+ unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occurring */
n = fprintf(stream, format_templ, x);
} else if(!HDstrcmp(modifier, "l")) {
- unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occuring */
+ unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occurring */
n = fprintf(stream, format_templ, x);
} else {
- uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occuring */
+ uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occurring */
n = fprintf(stream, format_templ, x);
}
break;
@@ -333,7 +333,7 @@ HDfprintf(FILE *stream, const char *fmt, ...)
case 'a':
{
- haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occuring */
+ haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occurring */
if(H5F_addr_defined(x)) {
len = 0;
@@ -387,7 +387,7 @@ HDfprintf(FILE *stream, const char *fmt, ...)
case 's':
case 'p':
{
- char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occuring */
+ char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occurring */
n = fprintf(stream, format_templ, x);
}
break;
diff --git a/test/cache_common.c b/test/cache_common.c
index 5596601..2cc188e 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -6413,7 +6413,7 @@ validate_mdc_config(hid_t file_id,
* the supplied external configuration.
*
* The cache also sets the initial_size field to the current
- * cache max size instead of the value initialy supplied.
+ * cache max size instead of the value initially supplied.
* Depending on circumstances, this may or may not match
* the original. Hence the compare_init parameter.
*/
diff --git a/test/cache_common.h b/test/cache_common.h
index 8999e44..9c66357 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -180,7 +180,7 @@ typedef struct flush_op
hbool_t flag; /* boolean flag passed into the
* function implementing the flush
* operation. The meaning of the
- * flag is dependant upon the flush
+ * flag is dependent upon the flush
* operation:
*
* FLUSH_OP__DIRTY: TRUE iff the
diff --git a/test/cache_image.c b/test/cache_image.c
index 58b0b8f..5967ecc 100644
--- a/test/cache_image.c
+++ b/test/cache_image.c
@@ -511,7 +511,7 @@ delete_datasets(hid_t file_id, int min_dset, int max_dset)
* FAPL entry when opening the file, and verify that the
* metadata cache is notified.
*
- * If config_fsm is TRUE, setup the persistant free space
+ * If config_fsm is TRUE, setup the persistent free space
* manager. Note that this flag may only be set if
* create_file is also TRUE.
*
@@ -654,7 +654,7 @@ open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected,
if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
- /* setup the persistant free space manager if indicated */
+ /* setup the persistent free space manager if indicated */
if ( ( pass ) && ( config_fsm ) ) {
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
@@ -5149,9 +5149,9 @@ cache_image_smoke_check_5(void)
* 13) Close the file.
*
* 14) Get the size of the file. Verify that it is less
- * than 20 KB. Without deletions and persistant free
+ * than 20 KB. Without deletions and persistent free
* space managers, size size is about 167 MB, so this
- * is sufficient to verify that the persistant free
+ * is sufficient to verify that the persistent free
* space managers are more or less doing their job.
*
* Note that in the absence of paged allocation, file
@@ -5473,9 +5473,9 @@ cache_image_smoke_check_6(void)
/* 14) Get the size of the file. Verify that it is less
- * than 20 KB. Without deletions and persistant free
+ * than 20 KB. Without deletions and persistent free
* space managers, size size is about 167 MB, so this
- * is sufficient to verify that the persistant free
+ * is sufficient to verify that the persistent free
* space managers are more or less doing their job.
*
* Note that in the absence of paged allocation, file
@@ -7160,7 +7160,7 @@ cache_image_api_error_check_4(void)
* called before any activity on the metadata cache.
* This is a potential problem, as satisfying the
* H5Fget_free_sections() call requires access to all
- * free space managers. When persistant free space
+ * free space managers. When persistent free space
* managers are enabled, this will require calling
* H5MF_tidy_self_referential_fsm_hack(). This is a
* non issue in the absence of a cache image. However,
@@ -7179,7 +7179,7 @@ cache_image_api_error_check_4(void)
* The test is set up as follows:
*
* 1) Create a HDF5 file with a cache image requested
- * and persistant free space managers enabled.
+ * and persistent free space managers enabled.
*
* 2) Create some data sets, and then delete some of
* of those near the beginning of the file.
@@ -7273,7 +7273,7 @@ get_free_sections_test(void)
/* 1) Create a HDF5 file with a cache image requested
- * and persistant free space managers enabled.
+ * and persistent free space managers enabled.
*/
if ( pass ) {
@@ -7666,13 +7666,13 @@ get_free_sections_test(void)
* the dirty metadata, the second scan will fail, as valid
* versions of the dirty metadata will not be available.
*
- * To make the test more useful, enable persistant free
+ * To make the test more useful, enable persistent free
* space managers.
*
* The test is set up as follows:
*
* 1) Create a HDF5 file without a cache image requested
- * and persistant free space managers enabled.
+ * and persistent free space managers enabled.
*
* 2) Create some data sets and verify them.
*
@@ -7753,7 +7753,7 @@ evict_on_close_test(void)
/* 1) Create a HDF5 file without a cache image requested
- * and persistant free space managers enabled.
+ * and persistent free space managers enabled.
*/
if ( pass ) {
diff --git a/test/cache_tagging.c b/test/cache_tagging.c
index d34cad3..119ba62 100644
--- a/test/cache_tagging.c
+++ b/test/cache_tagging.c
@@ -1514,7 +1514,7 @@ check_attribute_rename_tags(hid_t fcpl, int type)
haddr_t g_tag = 0;
hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */
- hbool_t persistant_fsms = FALSE;
+ hbool_t persistent_fsms = FALSE;
/* Testing Macro */
TESTING("tag application during attribute renaming");
@@ -1523,8 +1523,8 @@ check_attribute_rename_tags(hid_t fcpl, int type)
/* Setup */
/* ===== */
- /* check to see if the FCPL specified persistant free space managers */
- if(H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0)
+ /* check to see if the FCPL specified persistent free space managers */
+ if(H5Pget_file_space_strategy(fcpl, NULL, &persistent_fsms, NULL) < 0)
TEST_ERROR;
/* Allocate array */
@@ -1621,14 +1621,14 @@ check_attribute_rename_tags(hid_t fcpl, int type)
*/
if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR;
- /* If the free space managers are persistant, the
+ /* If the free space managers are persistent, the
* H5MF_tidy_self_referential_fsm_hack() must have been run.
* Since this function floats all self referential free space
* managers, the H5FD_MEM_SUPER FSM will not be in the metadata
* cache.
*/
- if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
- if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
+ if(!persistent_fsms && verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
+ if(!persistent_fsms && verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
/* verify btree header and leaf node belonging to group */
if ( verify_tag(fid, H5AC_BT2_HDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1693,7 +1693,7 @@ check_attribute_delete_tags(hid_t fcpl, int type)
haddr_t g_tag = 0;
hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */
- hbool_t persistant_fsms = FALSE;
+ hbool_t persistent_fsms = FALSE;
/* Testing Macro */
TESTING("tag application during attribute delete");
@@ -1702,8 +1702,8 @@ check_attribute_delete_tags(hid_t fcpl, int type)
/* Setup */
/* ===== */
- /* check to see if the FCPL specified persistant free space managers */
- if ( H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0 )
+ /* check to see if the FCPL specified persistent free space managers */
+ if ( H5Pget_file_space_strategy(fcpl, NULL, &persistent_fsms, NULL) < 0 )
TEST_ERROR;
/* Allocate array */
@@ -1781,13 +1781,13 @@ check_attribute_delete_tags(hid_t fcpl, int type)
TEST_ERROR;
#if 0
- /* If the free space managers are persistant, the
+ /* If the free space managers are persistent, the
* H5MF_tidy_self_referential_fsm_hack() must have been run.
* Since this function floats all self referential free space
* managers, the H5FD_MEM_SUPER FSM will not be in the metadata
* cache.
*/
- if ( ( ! persistant_fsms ) &&
+ if ( ( ! persistent_fsms ) &&
( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) )
TEST_ERROR;
#endif
diff --git a/test/dtransform.c b/test/dtransform.c
index 0381bb8..f022699 100644
--- a/test/dtransform.c
+++ b/test/dtransform.c
@@ -744,7 +744,7 @@ test_getset(const hid_t dxpl_id_c_to_f)
HDfree(ptrgetTest);
ptrgetTest = NULL;
- TESTING("data transform, read after reseting of transform property")
+ TESTING("data transform, read after resetting of transform property")
if(H5Pset_data_transform(dxpl_id_c_to_f, simple) < 0)
TEST_ERROR
diff --git a/test/enum.c b/test/enum.c
index 4e20713..588e9b3 100644
--- a/test/enum.c
+++ b/test/enum.c
@@ -374,7 +374,7 @@ test_tr2(hid_t file)
E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED};
c_e1 data2[10];
- TESTING("O(log N) converions");
+ TESTING("O(log N) conversions");
if((cwg = H5Gcreate2(file, "test_tr2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
diff --git a/test/mf.c b/test/mf.c
index 9203804..04b7c0b 100644
--- a/test/mf.c
+++ b/test/mf.c
@@ -6114,21 +6114,21 @@ test_mf_fs_persist_split(void)
TESTING("File's free-space managers are persistent for split-file");
- /* for now, we don't support persistant free space managers
+ /* for now, we don't support persistent free space managers
* with the split file driver.
*/
SKIPPED();
- HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n");
return 0; /* <========== note return */
/* File creation property list template */
if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- /* for now, we don't support persistant free space managers
+ /* for now, we don't support persistent free space managers
* with the split file driver.
*/
SKIPPED();
- HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n");
return 0; /* <========== note return */
/* File creation property list template */
@@ -6436,18 +6436,18 @@ test_mf_fs_persist_multi(void)
TESTING("File's free-space managers are persistent for multi-file");
- /* for now, we don't support persistant free space managers
+ /* for now, we don't support persistent free space managers
* with the multi file driver.
*/
SKIPPED();
- HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n");
return 0; /* <========== note return */
- /* for now, we don't support persistant free space managers
+ /* for now, we don't support persistent free space managers
* with the multi file driver.
*/
SKIPPED();
- HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n");
return 0; /* <========== note return */
/* File creation property list template */
diff --git a/test/mtime.c b/test/mtime.c
index 2cd2d31..80730eb 100644
--- a/test/mtime.c
+++ b/test/mtime.c
@@ -139,7 +139,7 @@ main(void)
if(oi1.ctime != MTIME1) {
H5_FAILED();
/* If this fails, examine H5Omtime.c. Modification time is very
- * system dependant (e.g., on Windows DST must be hardcoded). */
+ * system dependent (e.g., on Windows DST must be hardcoded). */
puts(" Old modification time incorrect");
goto error;
}
diff --git a/test/page_buffer.c b/test/page_buffer.c
index a6e85ee..3c61ab0 100644
--- a/test/page_buffer.c
+++ b/test/page_buffer.c
@@ -619,7 +619,7 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr)
if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements)))
FAIL_STACK_ERROR;
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
if(H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int)*(size_t)num_elements, data) < 0)
@@ -902,7 +902,7 @@ test_lru_processing(hid_t orig_fapl, const char *env_h5_drvr)
if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements)))
FAIL_STACK_ERROR;
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
@@ -1440,7 +1440,7 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements)))
FAIL_STACK_ERROR;
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
@@ -1567,7 +1567,7 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements)))
FAIL_STACK_ERROR;
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
@@ -1807,7 +1807,7 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
FAIL_STACK_ERROR;
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
diff --git a/test/tmisc.c b/test/tmisc.c
index 07234e9..1e62302 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -5499,7 +5499,7 @@ test_misc33(void)
MESSAGE(5, ("Testing that bad offset into the heap returns error"));
/* Open the test file */
- fid = H5Fopen(testfile, H5F_ACC_RDWR, H5P_DEFAULT);
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
/* Case (1) */
diff --git a/test/vds.c b/test/vds.c
index 3f00224..4f03bbd 100644
--- a/test/vds.c
+++ b/test/vds.c
@@ -115,7 +115,7 @@ vds_select_equal(hid_t space1, hid_t space2)
if(npoints1 != npoints2)
return FALSE;
- /* Allocate point lists. Do not return directly afer
+ /* Allocate point lists. Do not return directly after
* allocating, to make sure buffers are freed. */
if(NULL == (buf1 = (hsize_t *)HDmalloc((size_t)rank1 * (size_t)npoints1 * sizeof(hsize_t))))
TEST_ERROR
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 21a5ce0..3647732 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -31,22 +31,56 @@ char filenames[1][256];
int nerrors = 0;
+size_t cur_filter_idx = 0;
+
#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
-static void test_one_chunk_filtered_dataset(void);
-static void test_filtered_dataset_no_overlap(void);
-static void test_filtered_dataset_overlap(void);
-static void test_filtered_dataset_single_no_selection(void);
-static void test_filtered_dataset_all_no_selection(void);
-static void test_filtered_dataset_point_selection(void);
-static void test_filtered_dataset_interleaved_write(void);
-static void test_3d_filtered_dataset_no_overlap_separate_pages(void);
-static void test_3d_filtered_dataset_no_overlap_same_pages(void);
-static void test_3d_filtered_dataset_overlap(void);
-static void test_cmpd_filtered_dataset_no_conversion_unshared(void);
-static void test_cmpd_filtered_dataset_no_conversion_shared(void);
-static void test_cmpd_filtered_dataset_type_conversion_unshared(void);
-static void test_cmpd_filtered_dataset_type_conversion_shared(void);
+static herr_t set_dcpl_filter(hid_t dcpl);
+
+/* Tests for writing data in parallel */
+static void test_write_one_chunk_filtered_dataset(void);
+static void test_write_filtered_dataset_no_overlap(void);
+static void test_write_filtered_dataset_overlap(void);
+static void test_write_filtered_dataset_single_no_selection(void);
+static void test_write_filtered_dataset_all_no_selection(void);
+static void test_write_filtered_dataset_point_selection(void);
+static void test_write_filtered_dataset_interleaved_write(void);
+static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void);
+static void test_write_3d_filtered_dataset_no_overlap_same_pages(void);
+static void test_write_3d_filtered_dataset_overlap(void);
+static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void);
+static void test_write_cmpd_filtered_dataset_no_conversion_shared(void);
+static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void);
+static void test_write_cmpd_filtered_dataset_type_conversion_shared(void);
+
+/* Tests for reading data in parallel */
+static void test_read_one_chunk_filtered_dataset(void);
+static void test_read_filtered_dataset_no_overlap(void);
+static void test_read_filtered_dataset_overlap(void);
+static void test_read_filtered_dataset_single_no_selection(void);
+static void test_read_filtered_dataset_all_no_selection(void);
+static void test_read_filtered_dataset_point_selection(void);
+static void test_read_filtered_dataset_interleaved_read(void);
+static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void);
+static void test_read_3d_filtered_dataset_no_overlap_same_pages(void);
+static void test_read_3d_filtered_dataset_overlap(void);
+static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void);
+static void test_read_cmpd_filtered_dataset_no_conversion_shared(void);
+static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void);
+static void test_read_cmpd_filtered_dataset_type_conversion_shared(void);
+
+/* Other miscellaneous tests */
+static void test_shrinking_growing_chunks(void);
+
+/*
+ * Tests for attempting to round-trip the data going from
+ *
+ * written serially -> read in parallel
+ *
+ * and
+ *
+ * written in parallel -> read serially
+ */
static void test_write_serial_read_parallel(void);
static void test_write_parallel_read_serial(void);
@@ -56,25 +90,60 @@ static int mpi_rank;
static int mpi_size;
static void (*tests[])(void) = {
- test_one_chunk_filtered_dataset,
- test_filtered_dataset_no_overlap,
- test_filtered_dataset_overlap,
- test_filtered_dataset_single_no_selection,
- test_filtered_dataset_all_no_selection,
- test_filtered_dataset_point_selection,
- test_filtered_dataset_interleaved_write,
- test_3d_filtered_dataset_no_overlap_separate_pages,
- test_3d_filtered_dataset_no_overlap_same_pages,
- test_3d_filtered_dataset_overlap,
- test_cmpd_filtered_dataset_no_conversion_unshared,
- test_cmpd_filtered_dataset_no_conversion_shared,
- test_cmpd_filtered_dataset_type_conversion_unshared,
- test_cmpd_filtered_dataset_type_conversion_shared,
- test_write_serial_read_parallel,
- test_write_parallel_read_serial,
+ test_write_one_chunk_filtered_dataset,
+ test_write_filtered_dataset_no_overlap,
+ test_write_filtered_dataset_overlap,
+ test_write_filtered_dataset_single_no_selection,
+ test_write_filtered_dataset_all_no_selection,
+ test_write_filtered_dataset_point_selection,
+ test_write_filtered_dataset_interleaved_write,
+ test_write_3d_filtered_dataset_no_overlap_separate_pages,
+ test_write_3d_filtered_dataset_no_overlap_same_pages,
+ test_write_3d_filtered_dataset_overlap,
+ test_write_cmpd_filtered_dataset_no_conversion_unshared,
+ test_write_cmpd_filtered_dataset_no_conversion_shared,
+ test_write_cmpd_filtered_dataset_type_conversion_unshared,
+ test_write_cmpd_filtered_dataset_type_conversion_shared,
+ test_read_one_chunk_filtered_dataset,
+ test_read_filtered_dataset_no_overlap,
+ test_read_filtered_dataset_overlap,
+ test_read_filtered_dataset_single_no_selection,
+ test_read_filtered_dataset_all_no_selection,
+ test_read_filtered_dataset_point_selection,
+ test_read_filtered_dataset_interleaved_read,
+ test_read_3d_filtered_dataset_no_overlap_separate_pages,
+ test_read_3d_filtered_dataset_no_overlap_same_pages,
+ test_read_3d_filtered_dataset_overlap,
+ test_read_cmpd_filtered_dataset_no_conversion_unshared,
+ test_read_cmpd_filtered_dataset_no_conversion_shared,
+ test_read_cmpd_filtered_dataset_type_conversion_unshared,
+ test_read_cmpd_filtered_dataset_type_conversion_shared,
+ test_write_serial_read_parallel,
+ test_write_parallel_read_serial,
+ test_shrinking_growing_chunks,
};
/*
+ * Function to call the appropriate HDF5 filter-setting function
+ * depending on the currently set index. Used to re-run the tests
+ * with different filters to check that the data still comes back
+ * correctly under a variety of circumstances, such as the
+ * Fletcher32 checksum filter increasing the size of the chunk.
+ */
+static herr_t
+set_dcpl_filter(hid_t dcpl)
+{
+ switch (cur_filter_idx) {
+ case 0:
+ return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ case 1:
+ return H5Pset_fletcher32(dcpl);
+ default:
+ return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ }
+}
+
+/*
* Tests parallel write of filtered data in the special
* case where a dataset is composed of a single chunk.
*
@@ -82,31 +151,32 @@ static void (*tests[])(void) = {
* 02/01/2017
*/
static void
-test_one_chunk_filtered_dataset(void)
+test_write_one_chunk_filtered_dataset(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t chunk_dims[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t sel_dims[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t count[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t stride[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t block[ONE_CHUNK_FILTERED_DATASET_DIMS];
- hsize_t offset[ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t start[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing one-chunk filtered dataset");
+ if (MAINPROCESS) puts("Testing write to one-chunk filtered dataset");
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -114,29 +184,30 @@ test_one_chunk_filtered_dataset(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NROWS;
- dataset_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS;
- chunk_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- sel_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS;
-
- filespace = H5Screate_simple(ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -148,66 +219,73 @@ test_one_chunk_filtered_dataset(void)
*/
count[0] = 1;
count[1] = 1;
- stride[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- stride[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
block[0] = sel_dims[0];
block[1] = sel_dims[1];
- offset[0] = ((hsize_t) mpi_rank * sel_dims[0]);
- offset[1] = 0;
+ start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
+ start[1] = 0;
- if (VERBOSE_MED)
- printf("Process %d: count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0),
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS * (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
+ data_size = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS
+ * (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = ((C_DATATYPE) i % (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
- + ((C_DATATYPE) i / (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+ correct_buf[i] = ((C_DATATYPE) i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
+ + ((C_DATATYPE) i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -228,18 +306,18 @@ test_one_chunk_filtered_dataset(void)
* 02/01/2017
*/
static void
-test_filtered_dataset_no_overlap(void)
+test_write_filtered_dataset_no_overlap(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t count[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t stride[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t block[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t offset[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -250,9 +328,11 @@ test_filtered_dataset_no_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -260,29 +340,30 @@ test_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS;
-
- filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -293,66 +374,76 @@ test_filtered_dataset_no_overlap(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- offset[0] = ((hsize_t) mpi_rank * (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
- offset[1] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d: count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ( (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])));
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -374,18 +465,18 @@ test_filtered_dataset_no_overlap(void)
* 02/01/2017
*/
static void
-test_filtered_dataset_overlap(void)
+test_write_filtered_dataset_overlap(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t count[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t stride[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t block[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t offset[SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -396,9 +487,11 @@ test_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -406,29 +499,30 @@ test_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
- sel_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
- filespace = H5Screate_simple(SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -438,68 +532,77 @@ test_filtered_dataset_overlap(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS;
- count[1] = (hsize_t) SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
- block[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS;
- offset[0] = (hsize_t) mpi_rank * block[0];
- offset[1] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -523,18 +626,18 @@ test_filtered_dataset_overlap(void)
* 02/01/2017
*/
static void
-test_filtered_dataset_single_no_selection(void)
+test_write_filtered_dataset_single_no_selection(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t count[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t stride[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t block[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t offset[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
size_t segment_length;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
@@ -546,9 +649,11 @@ test_filtered_dataset_single_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -556,32 +661,33 @@ test_filtered_dataset_single_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
-
- if (mpi_rank == SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ dataset_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
sel_dims[0] = sel_dims[1] = 0;
- filespace = H5Screate_simple(SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -592,73 +698,84 @@ test_filtered_dataset_single_no_selection(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- offset[0] = (hsize_t) mpi_rank * (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
- offset[1] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- if (mpi_rank == SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
else
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ( (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])));
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
/* Compute the correct offset into the buffer for the process having no selection and clear it */
segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
- HDmemset(correct_buf + ((size_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), 0, segment_length * sizeof(*data));
+ HDmemset(correct_buf + ((size_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*data));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -683,14 +800,14 @@ test_filtered_dataset_single_no_selection(void)
* 02/02/2017
*/
static void
-test_filtered_dataset_all_no_selection(void)
+test_write_filtered_dataset_all_no_selection(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -701,9 +818,11 @@ test_filtered_dataset_all_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -711,28 +830,29 @@ test_filtered_dataset_all_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ dataset_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
sel_dims[0] = sel_dims[1] = 0;
- filespace = H5Screate_simple(ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -748,11 +868,11 @@ test_filtered_dataset_all_no_selection(void)
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
@@ -761,27 +881,31 @@ test_filtered_dataset_all_no_selection(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -800,15 +924,15 @@ test_filtered_dataset_all_no_selection(void)
* 02/02/2017
*/
static void
-test_filtered_dataset_point_selection(void)
+test_write_filtered_dataset_point_selection(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *correct_buf = NULL;
C_DATATYPE *read_buf = NULL;
hsize_t *coords = NULL;
- hsize_t dataset_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, j, data_size, correct_buf_size;
size_t num_points;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
@@ -820,9 +944,11 @@ test_filtered_dataset_point_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -830,29 +956,30 @@ test_filtered_dataset_point_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
-
- filespace = H5Screate_simple(POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims,NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -863,62 +990,69 @@ test_filtered_dataset_point_selection(void)
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- num_points = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
- coords = (hsize_t *) calloc(1, 2 * num_points * sizeof(*coords));
- VRFY((NULL != coords), "Coords calloc succeeded");
+ num_points = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
+ coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ VRFY((NULL != coords), "Coords HDcalloc succeeded");
for (i = 0; i < num_points; i++)
- for (j = 0; j < POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
- coords[(i * POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = (j > 0) ? (i % (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
- : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+ for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
+ (j > 0) ? (i % (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
- VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t) num_points, (const hsize_t *) coords) >= 0),
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
"Point selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (coords) free(coords);
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (coords) HDfree(coords);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -941,7 +1075,7 @@ test_filtered_dataset_point_selection(void)
* 02/02/2017
*/
static void
-test_filtered_dataset_interleaved_write(void)
+test_write_filtered_dataset_interleaved_write(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -949,10 +1083,10 @@ test_filtered_dataset_interleaved_write(void)
hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
- hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t start[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
- hsize_t offset[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -963,9 +1097,11 @@ test_filtered_dataset_interleaved_write(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -990,10 +1126,11 @@ test_filtered_dataset_interleaved_write(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
@@ -1011,67 +1148,77 @@ test_filtered_dataset_interleaved_write(void)
stride[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
block[0] = 1;
block[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
- offset[0] = (hsize_t) mpi_rank;
- offset[1] = 0;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add Column Index */
- correct_buf[i] = (C_DATATYPE) ( (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+ /* Add Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))));
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1091,18 +1238,18 @@ test_filtered_dataset_interleaved_write(void)
* 02/06/2017
*/
static void
-test_3d_filtered_dataset_no_overlap_separate_pages(void)
+test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t count[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t stride[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t block[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t offset[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -1113,9 +1260,11 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1123,32 +1272,33 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
- chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
sel_dims[2] = 1;
- filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -1158,38 +1308,41 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
count[2] = 1;
- stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
block[2] = 1;
- offset[0] = 0;
- offset[1] = 0;
- offset[2] = (hsize_t) mpi_rank;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t) mpi_rank;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
@@ -1201,27 +1354,31 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1242,18 +1399,18 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void)
* 02/06/2017
*/
static void
-test_3d_filtered_dataset_no_overlap_same_pages(void)
+test_write_3d_filtered_dataset_no_overlap_same_pages(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t count[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t stride[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t block[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- hsize_t offset[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id, dset_id, plist_id;
hid_t filespace, memspace;
@@ -1264,9 +1421,11 @@ test_3d_filtered_dataset_no_overlap_same_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1274,32 +1433,33 @@ test_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
- chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- sel_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
- filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -1310,69 +1470,79 @@ test_3d_filtered_dataset_no_overlap_same_pages(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
count[2] = (hsize_t) mpi_size;
- stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
block[2] = 1;
- offset[0] = ((hsize_t) mpi_rank * (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
- offset[1] = 0;
- offset[2] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((i % (dataset_dims[0] * dataset_dims[1])) + (i / (dataset_dims[0] * dataset_dims[1])));
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1393,18 +1563,18 @@ test_3d_filtered_dataset_no_overlap_same_pages(void)
* 02/06/2017
*/
static void
-test_3d_filtered_dataset_overlap(void)
+test_write_3d_filtered_dataset_overlap(void)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- hsize_t dataset_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t chunk_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t sel_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t count[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t stride[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t block[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t offset[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
@@ -1415,9 +1585,11 @@ test_3d_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1425,32 +1597,33 @@ test_3d_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NROWS;
- dataset_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NCOLS;
- dataset_dims[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH;
- chunk_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- chunk_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
- sel_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NCOLS;
- sel_dims[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ sel_dims[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
- filespace = H5Screate_simple(SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -1460,77 +1633,89 @@ test_3d_filtered_dataset_overlap(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NROWS / SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
- count[1] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NCOLS / SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
- count[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH;
- stride[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- stride[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ count[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
stride[2] = 1;
block[0] = 1;
- block[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
block[2] = 1;
- offset[0] = (hsize_t) mpi_rank;
- offset[1] = 0;
- offset[2] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride,
+ count, block) >= 0), "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add the Column Index */
- correct_buf[i] = (C_DATATYPE) ( (i % (hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ /* Add the Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS)) / (hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ / (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS) * (i / (hsize_t) (mpi_size * SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS))));
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)
+ * (i / (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+ );
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1549,21 +1734,22 @@ test_3d_filtered_dataset_overlap(void)
* Programmer: Jordan Henderson
* 02/10/2017
*/
-/* JTH: This test currently cannot be data-verified due to the floating-point data involved */
static void
-test_cmpd_filtered_dataset_no_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
{
- cmpd_filtered_t *data = NULL;
- hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t count[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t stride[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t block[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t offset[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- size_t i;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
@@ -1571,9 +1757,11 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1581,38 +1769,41 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
- chunk_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
/* Create the compound type for memory. */
- memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t));
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
VRFY((memtype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -1623,45 +1814,88 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
- stride[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- stride[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- offset[0] = 0;
- offset[1] = ((hsize_t) mpi_rank * COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC);
- for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
data[i].field1 = (short) GEN_DATA(i);
data[i].field2 = (int) GEN_DATA(i);
data[i].field3 = (long) GEN_DATA(i);
- data[i].field4 = (double) GEN_DATA(i);
+ }
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1681,31 +1915,34 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void)
* Programmer: Jordan Henderson
* 02/10/2017
*/
-/* JTH: This test currently cannot be data-verified due to the floating-point data involved */
static void
-test_cmpd_filtered_dataset_no_conversion_shared(void)
+test_write_cmpd_filtered_dataset_no_conversion_shared(void)
{
- cmpd_filtered_t *data = NULL;
- hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t count[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t stride[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t block[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t offset[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- size_t i;
- hid_t file_id, dset_id, plist_id, memtype;
- hid_t filespace, memspace;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, memtype;
+ hid_t filespace, memspace;
if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id>= 0), "FAPL creation succeeded");
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1713,38 +1950,41 @@ test_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
- chunk_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
/* Create the compound type for memory. */
- memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t));
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
VRFY((memtype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -1755,45 +1995,91 @@ test_cmpd_filtered_dataset_no_conversion_shared(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
- stride[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- offset[0] = (hsize_t) mpi_rank;
- offset[1] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC);
- for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
data[i].field1 = (short) GEN_DATA(i);
data[i].field2 = (int) GEN_DATA(i);
data[i].field3 = (long) GEN_DATA(i);
- data[i].field4 = (double) GEN_DATA(i);
+ }
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (data) free(data);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1810,29 +2096,30 @@ test_cmpd_filtered_dataset_no_conversion_shared(void)
* chunks using a compound datatype which requires a
* datatype conversion.
*
- * This test currently should fail because the datatype
- * conversion causes the parallel library to break
- * to independent I/O and this isn't allowed when
+ * NOTE: This test currently should fail because the
+ * datatype conversion causes the parallel library to
+ * break to independent I/O and this isn't allowed when
* there are filters in the pipeline.
*
* Programmer: Jordan Henderson
* 02/07/2017
*/
-/* JTH: This test currently cannot be data-verified due to the floating-point data involved */
static void
-test_cmpd_filtered_dataset_type_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
{
- cmpd_filtered_t *data = NULL;
- hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t count[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t stride[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t block[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t offset[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- size_t i;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
@@ -1840,9 +2127,11 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1850,48 +2139,53 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
- chunk_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
/* Create the compound type for memory. */
- memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t));
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
VRFY((memtype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1901,48 +2195,74 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
- stride[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- stride[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- offset[0] = 0;
- offset[1] = ((hsize_t) mpi_rank * COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- memset(data, 0, sizeof(cmpd_filtered_t) * (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC);
- for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
data[i].field1 = (short) GEN_DATA(i);
data[i].field2 = (int) GEN_DATA(i);
data[i].field3 = (long) GEN_DATA(i);
- data[i].field4 = (double) GEN_DATA(i);
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY {
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
+ "Dataset write succeeded");
} H5E_END_TRY;
- if (data) free(data);
+ if (data) HDfree(data);
+
+ /* Verify that no data was written */
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1960,29 +2280,30 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void)
* chunks using a compound datatype which requires
* a datatype conversion.
*
- * This test currently should fail because the datatype
- * conversion causes the parallel library to break
- * to independent I/O and this isn't allowed when
+ * NOTE: This test currently should fail because the
+ * datatype conversion causes the parallel library to
+ * break to independent I/O and this isn't allowed when
* there are filters in the pipeline.
*
* Programmer: Jordan Henderson
* 02/10/2017
*/
-/* JTH: This test currently cannot be data-verified due to the floating-point data involved */
static void
-test_cmpd_filtered_dataset_type_conversion_shared(void)
+test_write_cmpd_filtered_dataset_type_conversion_shared(void)
{
- cmpd_filtered_t *data = NULL;
- hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t count[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t stride[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t block[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t offset[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- size_t i;
- hid_t file_id, dset_id, plist_id, filetype, memtype;
- hid_t filespace, memspace;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, filetype, memtype;
+ hid_t filespace, memspace;
if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
@@ -1990,9 +2311,11 @@ test_cmpd_filtered_dataset_type_conversion_shared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2000,47 +2323,52 @@ test_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
- chunk_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
/* Create the compound type for memory. */
- memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t));
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
VRFY((memtype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
@@ -2051,48 +2379,74 @@ test_cmpd_filtered_dataset_type_conversion_shared(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
- stride[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- offset[0] = (hsize_t) mpi_rank;
- offset[1] = 0;
-
- if (VERBOSE_MED)
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]);
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
- data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC);
- for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
data[i].field1 = (short) GEN_DATA(i);
data[i].field2 = (int) GEN_DATA(i);
data[i].field3 = (long) GEN_DATA(i);
- data[i].field4 = (double) GEN_DATA(i);
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY {
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
+ "Dataset write succeeded");
} H5E_END_TRY;
- if (data) free(data);
+ if (data) HDfree(data);
+
+ /* Verify that no data was written */
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2106,6 +2460,2940 @@ test_cmpd_filtered_dataset_type_conversion_shared(void)
}
/*
+ * Tests parallel read of filtered data in the special
+ * case where a dataset is composed of a single chunk.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the singular chunk and contributes its piece to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/14/2018
+ */
+static void
+test_read_one_chunk_filtered_dataset(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t start[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t stride[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = ((C_DATATYPE) i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
+ + ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+
+ if (MAINPROCESS) {
+ puts("Testing read from one-chunk filtered dataset");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ block[0] = sel_dims[0];
+ block[1] = sel_dims[1];
+ start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where only
+ * one process is reading from a particular chunk in the operation.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global buffer
+ * that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_no_overlap(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing read from unshared filtered chunks");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and reads
+ * it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * more than one process is reading from a particular chunk
+ * in the operation.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its pieces
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_overlap(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing read from shared filtered chunks");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t loop_count = count[0];
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; loop_count; loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * a single process in the read operation has no selection
+ * in the dataset's dataspace.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank (except for one)
+ * reads a part of the dataset and contributes its piece
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_single_no_selection(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ size_t segment_length;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ /* Compute the correct offset into the buffer for the process having no selection and clear it */
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
+ HDmemset(correct_buf + ((size_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*correct_buf));
+
+ if (MAINPROCESS) {
+ puts("Testing read from filtered chunks with a single process having no selection");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ sel_dims[0] = sel_dims[1] = 0;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ else
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS);
+ recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0;
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * (size_t) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ else
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * no process in the read operation has a selection in the
+ * dataset's dataspace. This test is to ensure that there
+ * are no assertion failures or similar issues due to size
+ * 0 allocations and the like.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will simply issue
+ * a no-op read.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_all_no_selection(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ if (MAINPROCESS) {
+ puts("Testing read from filtered chunks with all processes having no selection");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = sel_dims[1] = 0;
+
+ memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data by using point
+ * selections instead of hyperslab selections.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will read part
+ * of the dataset using a point selection and will
+ * contribute its piece to a global buffer that is
+ * checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_point_selection(void)
+{
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t *coords = NULL;
+ hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, j, read_buf_size, correct_buf_size;
+ size_t num_points;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing read from filtered chunks with point selection");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Set up point selection */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ num_points = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
+ coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ VRFY((NULL != coords), "Coords HDcalloc succeeded");
+
+ for (i = 0; i < num_points; i++)
+ for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
+ (j > 0) ? (i % (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
+ "Point selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t original_loop_count = dataset_dims[0] / (hsize_t) mpi_size;
+ size_t cur_loop_count = original_loop_count;
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; cur_loop_count; cur_loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * each process reads an equal amount of data from each
+ * chunk in the dataset. Each chunk is distributed among the
+ * processes in round-robin fashion by blocks of size 1 until
+ * the whole chunk is selected, leading to an interleaved
+ * read pattern.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will read part
+ * of each chunk of the dataset and will contribute its
+ * pieces to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_interleaved_read(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t start[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t stride[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing interleaved read from filtered chunks");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS);
+ count[1] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS);
+ stride[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t loop_count = count[0];
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; loop_count; loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * its own "page" in the 3rd dimension.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads its own "page"
+ * of the dataset and contributes its piece to a global buffer
+ * that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
+{
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
+
+ if (MAINPROCESS) {
+ puts("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[2] = 1;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[2] = 1;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t) mpi_rank;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
+ * rank to write to the nth position of the global data buffer, where n is the rank number.
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_vector((int) flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+
+ /*
+ * Resize the type to allow interleaving,
+ * so make it only one MPI_LONG wide
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)),
+ "MPI_Type_create_resized");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
+ "MPI_Allgather succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * each "page" in the 3rd dimension. However, no chunk on a
+ * given "page" is read from by more than one process.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each "page" of the dataset and contributes its piece to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_no_overlap_same_pages(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[2] = (hsize_t) mpi_size;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * each "page" in the 3rd dimension. Further, each chunk in
+ * each "page" is read from equally by all processes.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads part of each
+ * chunk of each "page" and contributes its pieces to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_overlap(void)
+{
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add the Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ / (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)
+ * (i / (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+ );
+
+ if (MAINPROCESS) {
+ puts("Testing read from shared filtered chunks in 3D dataset");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = 1;
+ block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[2] = 1;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ {
+ size_t run_length = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH);
+ size_t num_blocks = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+
+ /*
+ * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
+ * rank to write to the nth position of the global data buffer, where n is the rank number.
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_vector((int) num_blocks, (int) run_length, (int) (mpi_size * (int) run_length), C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+
+ /*
+ * Resize the type to allow interleaving,
+ * so make it "run_length" MPI_LONGs wide
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint) (run_length * sizeof(long)), &resized_vector_type)),
+ "MPI_Type_create_resized");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data to unshared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global
+ * buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from shared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its piece
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_no_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, memtype;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from unshared
+ * chunks using a compound datatype which requires a
+ * datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global
+ * buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from shared
+ * chunks using a compound datatype which requires
+ * a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its pieces
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_type_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, filetype, memtype;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ puts("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
* Tests write of filtered data to a dataset
* by a single process. After the write has
* succeeded, the dataset is closed and then
@@ -2139,7 +5427,8 @@ test_write_serial_read_parallel(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2158,10 +5447,11 @@ test_write_serial_read_parallel(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
@@ -2172,15 +5462,16 @@ test_write_serial_read_parallel(void)
data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2188,11 +5479,11 @@ test_write_serial_read_parallel(void)
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
correct_buf[i] = (long) i;
@@ -2201,9 +5492,11 @@ test_write_serial_read_parallel(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2216,14 +5509,17 @@ test_write_serial_read_parallel(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf) free(correct_buf);
- if (read_buf) free(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
@@ -2265,9 +5561,11 @@ test_write_parallel_read_serial(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2278,12 +5576,12 @@ test_write_parallel_read_serial(void)
dataset_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NROWS;
dataset_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
dataset_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
- chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
- sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -2295,10 +5593,11 @@ test_write_parallel_read_serial(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((SET_FILTER(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
@@ -2310,34 +5609,37 @@ test_write_parallel_read_serial(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- count[2] = (hsize_t) mpi_size;
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ count[2] = (hsize_t) mpi_size;
stride[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
stride[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- block[2] = 1;
+ block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ block[2] = 1;
offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]);
offset[1] = 0;
offset[2] = 0;
- if (VERBOSE_MED)
+ if (VERBOSE_MED) {
printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ fflush(stdout);
+ }
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride,
+ count, block) >= 0), "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
- data = (C_DATATYPE *) calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE) GEN_DATA(i);
@@ -2346,11 +5648,13 @@ test_write_parallel_read_serial(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
- if (data) free(data);
+ if (data) HDfree(data);
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2362,7 +5666,8 @@ test_write_parallel_read_serial(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2374,18 +5679,23 @@ test_write_parallel_read_serial(void)
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
- read_buf = (C_DATATYPE *) calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((i % (dataset_dims[0] * dataset_dims[1])) + (i / (dataset_dims[0] * dataset_dims[1])));;
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
+ "Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2394,6 +5704,142 @@ test_write_parallel_read_serial(void)
return;
}
+/*
+ * Tests that causing chunks to continually grow and shrink
+ * by writing random data followed by zeroed-out data (and
+ * thus controlling the compression ratio) does not cause
+ * problems.
+ *
+ * Programmer: Jordan Henderson
+ * 06/04/2018
+ */
+static void
+test_shrinking_growing_chunks(void)
+{
+ float *data = NULL;
+ hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) puts("Testing continually shrinking/growing chunks");
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /*
+ * Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (float *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
+ /* Continually write random float data, followed by zeroed-out data */
+ if ((i % 2))
+ HDmemset(data, 0, data_size);
+ else {
+ size_t j;
+ for (j = 0; j < data_size / sizeof(*data); j++) {
+ data[j] = (float) ( rand() / (double) (RAND_MAX / (double) 1.0L) );
+ }
+ }
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+ }
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
int
main(int argc, char** argv)
{
@@ -2416,7 +5862,9 @@ main(int argc, char** argv)
}
if (H5dont_atexit() < 0) {
- printf("Failed to turn off atexit processing. Continue.\n");
+ if (MAINPROCESS) {
+ printf("Failed to turn off atexit processing. Continue.\n");
+ }
}
H5open();
@@ -2437,19 +5885,59 @@ main(int argc, char** argv)
VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded");
+ VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL),
+ "Test file name created");
+
+ file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((file_id >= 0), "Test file creation succeeded");
+
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ (*tests[i])();
+ }
+ else {
+ if (MAINPROCESS) MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
- VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL), "Test file name created");
+ /*
+ * Increment the filter index to switch to the checksum filter
+ * and re-run the tests.
+ */
+ cur_filter_idx++;
+
+ h5_clean_files(FILENAME, fapl);
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((file_id >= 0), "Test file creation succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ if (MAINPROCESS) {
+ printf("\n=================================================================\n");
+ printf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
+ printf("=================================================================\n\n");
+ }
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
(*tests[i])();
- } else {
+ }
+ else {
if (MAINPROCESS) MESG("MPI_Barrier failed");
nerrors++;
}
@@ -2461,7 +5949,9 @@ main(int argc, char** argv)
exit:
if (nerrors)
- if (MAINPROCESS) printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : "");
+ if (MAINPROCESS)
+ printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
+ nerrors > 1 ? "S" : "");
ALARM_OFF;
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index cb9a1ab..9543508 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -43,77 +43,83 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
/* Common defines for all tests */
#define C_DATATYPE long
+#define C_DATATYPE_MPI MPI_LONG
#define COMPOUND_C_DATATYPE cmpd_filtered_t
#define C_DATATYPE_STR(type) STRINGIFY(type)
#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
+/* Macro used to generate data for datasets for later verification */
#define GEN_DATA(i) INCREMENTAL_DATA(i)
-#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */
/* For experimental purposes only, will cause tests to fail data verification phase - JTH */
/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */
-#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
-#ifdef DYNAMIC_FILTER
-#define SET_FILTER(dcpl) H5Pset_filter(dcpl, filter_id, flags, FILTER_NUM_CDVALUES, cd_values) /* Test other filter in parallel */
-#else
-#define SET_FILTER(dcpl) H5Pset_deflate(dcpl, 6) /* Test GZIP filter in parallel */
-#endif
+#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */
+#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
+
+#define DEFAULT_DEFLATE_LEVEL 6
#define DIM0_SCALE_FACTOR 4
#define DIM1_SCALE_FACTOR 2
-/* Defines for the one-chunk filtered dataset test */
-#define ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset"
-#define ONE_CHUNK_FILTERED_DATASET_DIMS 2
-#define ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define ONE_CHUNK_FILTERED_DATASET_CH_NROWS ONE_CHUNK_FILTERED_DATASET_NROWS
-#define ONE_CHUNK_FILTERED_DATASET_CH_NCOLS ONE_CHUNK_FILTERED_DATASET_NCOLS
+/* Struct type for the compound datatype filtered dataset tests */
+typedef struct {
+ short field1;
+ int field2;
+ long field3;
+} COMPOUND_C_DATATYPE;
+
+/* Defines for the one-chunk filtered dataset write test */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write"
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS
/* Defines for the unshared filtered chunks write test */
-#define UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks"
-#define UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
-#define UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_CH_NROWS (UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
-#define UNSHARED_FILTERED_CHUNKS_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
/* Defines for the shared filtered chunks write test */
-#define SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks"
-#define SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
-#define SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
-#define SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
-#define SHARED_FILTERED_CHUNKS_NROWS (SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
-#define SHARED_FILTERED_CHUNKS_NCOLS (SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write"
+#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the filtered chunks write test where a process has no selection */
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks"
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
-#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
/* Defines for the filtered chunks write test where no process has a selection */
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks"
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write"
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered chunks write test with a point selection */
-#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks"
-#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define POINT_SELECTION_FILTERED_CHUNKS_NROWS (POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define POINT_SELECTION_FILTERED_CHUNKS_NCOLS (POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_write"
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered dataset interleaved write test */
-#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "interleaved_write_filtered_dataset"
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write"
#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2
#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size)
#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
@@ -121,75 +127,187 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the 3D unshared filtered dataset separate page write test */
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages"
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
-#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
/* Defines for the 3D unshared filtered dataset same page write test */
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages"
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
-#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
/* Defines for the 3d shared filtered dataset write test */
-#define SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks"
-#define SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
-#define SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
-#define SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define SHARED_FILTERED_CHUNKS_3D_NROWS (SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
-#define SHARED_FILTERED_CHUNKS_3D_NCOLS (SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
-#define SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
-
-/* Struct type for the compound datatype filtered dataset tests */
-typedef struct {
- short field1;
- int field2;
- long field3;
- double field4;
-} COMPOUND_C_DATATYPE;
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_write"
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
/* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion"
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion"
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
-#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
/* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion"
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion"
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
-#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+
+/* Defines for the one-chunk filtered dataset read test */
+#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read"
+#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS
+
+/* Defines for the unshared filtered chunks read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+
+/* Defines for the shared filtered chunks read test */
+#define READ_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_read"
+#define READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_NROWS (READ_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_NCOLS (READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the filtered chunks read test where a process has no selection */
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read"
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
+
+/* Defines for the filtered chunks read test where no process has a selection */
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read"
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered chunks read test with a point selection */
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_read"
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered dataset interleaved read test */
+#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read"
+#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size)
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NROWS (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the 3D unshared filtered dataset separate page read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3D unshared filtered dataset same page read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3d shared filtered dataset read test */
+#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_read"
+#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
+#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_NROWS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_NCOLS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion read test with unshared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion read test with shared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+
+/* Defines for the compound datatype filtered dataset type conversion read test with unshared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset type conversion read test with shared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
/* Defines for the write file serially/read in parallel test */
#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel"
@@ -209,4 +327,13 @@ typedef struct {
#define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / mpi_size)
#define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / mpi_size)
+/* Defines for the shrinking/growing chunks test */
+#define SHRINKING_GROWING_CHUNKS_DATASET_NAME "shrink_grow_chunks_test"
+#define SHRINKING_GROWING_CHUNKS_DATASET_DIMS 2
+#define SHRINKING_GROWING_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size)
+#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size)
+#define SHRINKING_GROWING_CHUNKS_NLOOPS 20
+
#endif /* TEST_PARALLEL_FILTERS_H_ */
diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c
index da713ac..ff67cf1 100644
--- a/tools/src/h5stat/h5stat.c
+++ b/tools/src/h5stat/h5stat.c
@@ -146,7 +146,7 @@ struct handler_t {
char **obj;
};
-static const char *s_opts ="Aa:Ddm:FfhGgl:sSTO:V";
+static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:V";
/* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */
static struct long_options l_opts[] = {
{"help", no_arg, 'h'},
@@ -224,6 +224,7 @@ static struct long_options l_opts[] = {
{ "attr", no_arg, 'A' },
{ "att", no_arg, 'A' },
{ "at", no_arg, 'A' },
+ { "enable-error-stack", no_arg, 'E' },
{ "numattrs", require_arg, 'a' },
{ "numattr", require_arg, 'a' },
{ "numatt", require_arg, 'a' },
@@ -293,6 +294,7 @@ static void usage(const char *prog)
HDfprintf(stdout, " than 0. The default threshold is 10.\n");
HDfprintf(stdout, " -s, --freespace Print free space information\n");
HDfprintf(stdout, " -S, --summary Print summary of file space information\n");
+ HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n");
}
@@ -378,9 +380,8 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi)
*
* Purpose: Gather statistics about the group
*
- * Return: Success: 0
- *
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
* Programmer: Quincey Koziol
* Tuesday, August 16, 2005
@@ -402,9 +403,9 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi)
static herr_t
group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
{
- H5G_info_t ginfo; /* Group information */
- unsigned bin; /* "bin" the number of objects falls in */
- herr_t ret;
+ H5G_info_t ginfo; /* Group information */
+ unsigned bin; /* "bin" the number of objects falls in */
+ herr_t ret_value = SUCCEED; /* Return value */
/* Gather statistics about this type of object */
iter->uniq_groups++;
@@ -414,8 +415,8 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
iter->group_ohdr_info.free_size += oi->hdr.space.free;
/* Get group information */
- ret = H5Gget_info_by_name(iter->fid, name, &ginfo, H5P_DEFAULT);
- HDassert(ret >= 0);
+ if((ret_value = H5Gget_info_by_name(iter->fid, name, &ginfo, H5P_DEFAULT)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Gget_info_by_name() failed");
/* Update link stats */
/* Collect statistics for small groups */
@@ -429,10 +430,10 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
bin = ceil_log10((unsigned long)ginfo.nlinks);
if((bin + 1) > iter->group_nbins) {
/* Allocate more storage for info about dataset's datatype */
- iter->group_bins = (unsigned long *)HDrealloc(iter->group_bins, (bin + 1) * sizeof(unsigned long));
- HDassert(iter->group_bins);
+ if((iter->group_bins = (unsigned long *)HDrealloc(iter->group_bins, (bin + 1) * sizeof(unsigned long))) == NULL)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed");
- /* Initialize counts for intermediate bins */
+ /* Initialize counts for intermediate bins */
while(iter->group_nbins < bin)
iter->group_bins[iter->group_nbins++] = 0;
iter->group_nbins++;
@@ -448,10 +449,11 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
iter->groups_heap_storage_size += oi->meta_size.obj.heap_size;
/* Update attribute metadata info */
- ret = attribute_stats(iter, oi);
- HDassert(ret >= 0);
+ if((ret_value = attribute_stats(iter, oi)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats failed");
- return 0;
+done:
+ return ret_value;
} /* end group_stats() */
@@ -461,7 +463,6 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
* Purpose: Gather statistics about the dataset
*
* Return: Success: 0
- *
* Failure: -1
*
* Programmer: Quincey Koziol
@@ -472,22 +473,22 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
static herr_t
dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
{
- unsigned bin; /* "bin" the number of objects falls in */
- hid_t did; /* Dataset ID */
- hid_t sid; /* Dataspace ID */
- hid_t tid; /* Datatype ID */
- hid_t dcpl; /* Dataset creation property list ID */
- hsize_t dims[H5S_MAX_RANK];/* Dimensions of dataset */
- H5D_layout_t lout; /* Layout of dataset */
- unsigned type_found; /* Whether the dataset's datatype was */
- /* already found */
- int ndims; /* Number of dimensions of dataset */
- hsize_t storage; /* Size of dataset storage */
- unsigned u; /* Local index variable */
- int num_ext; /* Number of external files for a dataset */
- int nfltr; /* Number of filters for a dataset */
- H5Z_filter_t fltr; /* Filter identifier */
- herr_t ret;
+ unsigned bin; /* "bin" the number of objects falls in */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ hsize_t dims[H5S_MAX_RANK]; /* Dimensions of dataset */
+ H5D_layout_t lout; /* Layout of dataset */
+ unsigned type_found; /* Whether the dataset's datatype was */
+ /* already found */
+ int ndims; /* Number of dimensions of dataset */
+ hsize_t storage; /* Size of dataset storage */
+ unsigned u; /* Local index variable */
+ int num_ext; /* Number of external files for a dataset */
+ int nfltr; /* Number of filters for a dataset */
+ H5Z_filter_t fltr; /* Filter identifier */
+ herr_t ret_value = SUCCEED; /* Return value */
/* Gather statistics about this type of object */
iter->uniq_dsets++;
@@ -496,26 +497,27 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
iter->dset_ohdr_info.total_size += oi->hdr.space.total;
iter->dset_ohdr_info.free_size += oi->hdr.space.free;
- did = H5Dopen2(iter->fid, name, H5P_DEFAULT);
- HDassert(did > 0);
+ if((did = H5Dopen2(iter->fid, name, H5P_DEFAULT)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dopen() failed");
/* Update dataset metadata info */
iter->datasets_index_storage_size += oi->meta_size.obj.index_size;
iter->datasets_heap_storage_size += oi->meta_size.obj.heap_size;
/* Update attribute metadata info */
- ret = attribute_stats(iter, oi);
- HDassert(ret >= 0);
+ if((ret_value = attribute_stats(iter, oi)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats() failed");
/* Get storage info */
- storage = H5Dget_storage_size(did);
+ if((storage = H5Dget_storage_size(did)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_storage_size() failed");
/* Gather layout statistics */
- dcpl = H5Dget_create_plist(did);
- HDassert(dcpl > 0);
+ if((dcpl = H5Dget_create_plist(did)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_create_plist() failed");
- lout = H5Pget_layout(dcpl);
- HDassert(lout >= 0);
+ if((lout = H5Pget_layout(dcpl)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_layout() failed");
/* Object header's total size for H5D_COMPACT layout includes raw data size */
/* "storage" also includes H5D_COMPACT raw data size */
@@ -526,8 +528,8 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
(iter->dset_layouts[lout])++;
/* Get the number of external files for the dataset */
- num_ext = H5Pget_external_count(dcpl);
- assert (num_ext >= 0);
+ if((num_ext = H5Pget_external_count(dcpl)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_external_count() failed");
/* Accumulate raw data size accordingly */
if(num_ext) {
@@ -537,11 +539,11 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
iter->dset_storage_size += storage;
/* Gather dataspace statistics */
- sid = H5Dget_space(did);
- HDassert(sid > 0);
+ if((sid = H5Dget_space(did)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sget_space() failed");
- ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
- HDassert(ndims >= 0);
+ if((ndims = H5Sget_simple_extent_dims(sid, dims, NULL)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims() failed");
/* Check for larger rank of dataset */
if((unsigned)ndims > iter->max_dset_rank)
@@ -552,38 +554,38 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
/* Only gather dim size statistics on 1-D datasets */
if(ndims == 1) {
- /* Determine maximum dimension size */
- if(dims[0] > iter->max_dset_dims)
- iter->max_dset_dims = dims[0];
- /* Collect statistics for small datasets */
- if(dims[0] < (hsize_t)sdsets_threshold)
- (iter->small_dset_dims[(size_t)dims[0]])++;
-
- /* Add dim count to proper bin */
- bin = ceil_log10((unsigned long)dims[0]);
- if((bin + 1) > iter->dset_dim_nbins) {
- /* Allocate more storage for info about dataset's datatype */
- iter->dset_dim_bins = (unsigned long *)HDrealloc(iter->dset_dim_bins, (bin + 1) * sizeof(unsigned long));
- HDassert(iter->dset_dim_bins);
-
- /* Initialize counts for intermediate bins */
- while(iter->dset_dim_nbins < bin)
- iter->dset_dim_bins[iter->dset_dim_nbins++] = 0;
- iter->dset_dim_nbins++;
-
- /* Initialize count for this bin */
- iter->dset_dim_bins[bin] = 1;
+ /* Determine maximum dimension size */
+ if(dims[0] > iter->max_dset_dims)
+ iter->max_dset_dims = dims[0];
+ /* Collect statistics for small datasets */
+ if(dims[0] < (hsize_t)sdsets_threshold)
+ (iter->small_dset_dims[(size_t)dims[0]])++;
+
+ /* Add dim count to proper bin */
+ bin = ceil_log10((unsigned long)dims[0]);
+ if((bin + 1) > iter->dset_dim_nbins) {
+ /* Allocate more storage for info about dataset's datatype */
+ if((iter->dset_dim_bins = (unsigned long *)HDrealloc(iter->dset_dim_bins, (bin + 1) * sizeof(unsigned long))) == NULL)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed");
+
+ /* Initialize counts for intermediate bins */
+ while(iter->dset_dim_nbins < bin)
+ iter->dset_dim_bins[iter->dset_dim_nbins++] = 0;
+ iter->dset_dim_nbins++;
+
+ /* Initialize count for this bin */
+ iter->dset_dim_bins[bin] = 1;
} /* end if */
else
(iter->dset_dim_bins[bin])++;
} /* end if */
- ret = H5Sclose(sid);
- HDassert(ret >= 0);
+ if(H5Sclose(sid) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sclose() failed");
/* Gather datatype statistics */
- tid = H5Dget_type(did);
- HDassert(tid > 0);
+ if((tid = H5Dget_type(did)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_type() failed");
type_found = FALSE;
for(u = 0; u < iter->dset_ntypes; u++)
@@ -591,6 +593,7 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
type_found = TRUE;
break;
} /* end for */
+
if(type_found)
(iter->dset_type_info[u].count)++;
else {
@@ -600,12 +603,12 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
iter->dset_ntypes++;
/* Allocate more storage for info about dataset's datatype */
- iter->dset_type_info = (dtype_info_t *)HDrealloc(iter->dset_type_info, iter->dset_ntypes * sizeof(dtype_info_t));
- HDassert(iter->dset_type_info);
+ if((iter->dset_type_info = (dtype_info_t *)HDrealloc(iter->dset_type_info, iter->dset_ntypes * sizeof(dtype_info_t))) == NULL)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed");
/* Initialize information about datatype */
- iter->dset_type_info[curr_ntype].tid = H5Tcopy(tid);
- HDassert(iter->dset_type_info[curr_ntype].tid > 0);
+ if((iter->dset_type_info[curr_ntype].tid = H5Tcopy(tid)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tcopy() failed");
iter->dset_type_info[curr_ntype].count = 1;
iter->dset_type_info[curr_ntype].named = 0;
@@ -617,8 +620,8 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
if(H5Tcommitted(tid) > 0)
(iter->dset_type_info[u].named)++;
- ret = H5Tclose(tid);
- HDassert(ret >= 0);
+ if(H5Tclose(tid) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tclose() failed");
/* Track different filters */
if((nfltr = H5Pget_nfilters(dcpl)) >= 0) {
@@ -635,13 +638,14 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
} /* end for */
} /* endif nfltr */
- ret = H5Pclose(dcpl);
- HDassert(ret >= 0);
+ if(H5Pclose(dcpl) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose() failed");
- ret = H5Dclose(did);
- HDassert(ret >= 0);
+ if(H5Dclose(did) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dclose() failed");
- return 0;
+done:
+ return ret_value;
} /* end dataset_stats() */
@@ -660,7 +664,7 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi)
static herr_t
datatype_stats(iter_t *iter, const H5O_info_t *oi)
{
- herr_t ret;
+ herr_t ret_value = SUCCEED;
/* Gather statistics about this type of object */
iter->uniq_dtypes++;
@@ -670,10 +674,10 @@ datatype_stats(iter_t *iter, const H5O_info_t *oi)
iter->dtype_ohdr_info.free_size += oi->hdr.space.free;
/* Update attribute metadata info */
- ret = attribute_stats(iter, oi);
- HDassert(ret >= 0);
-
- return 0;
+ if((ret_value = attribute_stats(iter, oi)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats() failed");
+done:
+ return ret_value;
} /* end datatype_stats() */
@@ -695,6 +699,7 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited,
void *_iter)
{
iter_t *iter = (iter_t *)_iter;
+ herr_t ret_value = SUCCEED;
/* If the object has already been seen then just return */
if(NULL == already_visited) {
@@ -704,15 +709,18 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited,
switch(oi->type) {
case H5O_TYPE_GROUP:
- group_stats(iter, path, oi);
+ if(group_stats(iter, path, oi) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "group_stats failed");
break;
case H5O_TYPE_DATASET:
- dataset_stats(iter, path, oi);
+ if(dataset_stats(iter, path, oi) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "dataset_stats failed");
break;
case H5O_TYPE_NAMED_DATATYPE:
- datatype_stats(iter, oi);
+ if(datatype_stats(iter, oi) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "datatype_stats failed");
break;
case H5O_TYPE_UNKNOWN:
@@ -724,7 +732,8 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited,
} /* end switch */
} /* end if */
- return 0;
+done:
+ return ret_value;
} /* end obj_stats() */
@@ -733,9 +742,8 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited,
*
* Purpose: Gather statistics about a link
*
- * Return: Success: 0
- *
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
* Programmer: Quincey Koziol
* Tuesday, November 6, 2007
@@ -892,6 +900,10 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
goto done;
break;
+ case 'E':
+ enable_error_stack = 1;
+ break;
+
case 'F':
display_all = FALSE;
display_file_metadata = TRUE;
@@ -913,14 +925,14 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
break;
case 'l':
- if(opt_arg) {
- sgroups_threshold = HDatoi(opt_arg);
- if(sgroups_threshold < 1) {
- error_msg("Invalid threshold for small groups\n");
- goto error;
- }
- } else
- error_msg("Missing threshold for small groups\n");
+ if(opt_arg) {
+ sgroups_threshold = HDatoi(opt_arg);
+ if(sgroups_threshold < 1) {
+ error_msg("Invalid threshold for small groups\n");
+ goto error;
+ }
+ } else
+ error_msg("Missing threshold for small groups\n");
break;
@@ -935,14 +947,14 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
break;
case 'm':
- if(opt_arg) {
- sdsets_threshold = HDatoi(opt_arg);
- if(sdsets_threshold < 1) {
- error_msg("Invalid threshold for small datasets\n");
- goto error;
- }
- } else
- error_msg("Missing threshold for small datasets\n");
+ if(opt_arg) {
+ sdsets_threshold = HDatoi(opt_arg);
+ if(sdsets_threshold < 1) {
+ error_msg("Invalid threshold for small datasets\n");
+ goto error;
+ }
+ } else
+ error_msg("Missing threshold for small datasets\n");
break;
@@ -957,13 +969,13 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
break;
case 'a':
- if(opt_arg) {
- sattrs_threshold = HDatoi(opt_arg);
- if(sattrs_threshold < 1) {
- error_msg("Invalid threshold for small # of attributes\n");
- goto error;
- }
- } else
+ if(opt_arg) {
+ sattrs_threshold = HDatoi(opt_arg);
+ if(sattrs_threshold < 1) {
+ error_msg("Invalid threshold for small # of attributes\n");
+ goto error;
+ }
+ } else
error_msg("Missing threshold for small # of attributes\n");
break;
@@ -1249,9 +1261,8 @@ print_group_info(const iter_t *iter)
*
* Purpose: Prints file space information for groups' metadata
*
- * Return: Success: 0
- *
- * Failure: Never fails
+ * Return: Success: 0
+ * Failure: Never fails
*
* Programmer: Vailin Choi; October 2009
*
@@ -1277,9 +1288,8 @@ print_group_metadata(const iter_t *iter)
*
* Purpose: Prints information about datasets in the file
*
- * Return: Success: 0
- *
- * Failure: Never fails
+ * Return: Success: 0
+ * Failure: Never fails
*
* Programmer: Elena Pourmal
* Saturday, August 12, 2006
@@ -1340,7 +1350,7 @@ print_dataset_info(const iter_t *iter)
printf("Dataset layout information:\n");
for(u = 0; u < H5D_NLAYOUTS; u++)
- printf("\tDataset layout counts[%s]: %lu\n", (u == H5D_COMPACT ? "COMPACT" :
+ printf("\tDataset layout counts[%s]: %lu\n", (u == H5D_COMPACT ? "COMPACT" :
(u == H5D_CONTIGUOUS ? "CONTIG" : (u == H5D_CHUNKED ? "CHUNKED" : "VIRTUAL"))), iter->dset_layouts[u]);
printf("\tNumber of external files : %lu\n", iter->nexternal);
@@ -1704,16 +1714,25 @@ main(int argc, const char *argv[])
iter_t iter;
const char *fname = NULL;
hid_t fid = -1;
+ H5E_auto2_t func;
+ H5E_auto2_t tools_func;
+ void *edata;
+ void *tools_edata;
struct handler_t *hand = NULL;
h5tools_setprogname(PROGRAMNAME);
h5tools_setstatus(EXIT_SUCCESS);
/* Disable error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &func, &edata);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* Initialize h5tools lib */
h5tools_init();
+
+ /* Disable tools error reporting */
+ H5Eget_auto2(H5tools_ERR_STACK_g, &tools_func, &tools_edata);
+ H5Eset_auto2(H5tools_ERR_STACK_g, NULL, NULL);
HDmemset(&iter, 0, sizeof(iter));
@@ -1722,6 +1741,11 @@ main(int argc, const char *argv[])
fname = argv[opt_ind];
+ if(enable_error_stack > 0) {
+ H5Eset_auto2(H5E_DEFAULT, func, edata);
+ H5Eset_auto2(H5tools_ERR_STACK_g, tools_func, tools_edata);
+ }
+
/* Check for filename given */
if(fname) {
hid_t fcpl;
@@ -1788,16 +1812,18 @@ main(int argc, const char *argv[])
unsigned u;
for(u = 0; u < hand->obj_count; u++) {
- if(h5trav_visit(fid, hand->obj[u], TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0)
- warn_msg("Unable to traverse object \"%s\"\n", hand->obj[u]);
- else
+ if(h5trav_visit(fid, hand->obj[u], TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) {
+ error_msg("unable to traverse object \"%s\"\n", hand->obj[u]);
+ h5tools_setstatus(EXIT_FAILURE);
+ } else
print_statistics(hand->obj[u], &iter);
} /* end for */
} /* end if */
else {
- if(h5trav_visit(fid, "/", TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0)
- warn_msg("Unable to traverse objects/links in file \"%s\"\n", fname);
- else
+ if(h5trav_visit(fid, "/", TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) {
+ error_msg("unable to traverse objects/links in file \"%s\"\n", fname);
+ h5tools_setstatus(EXIT_FAILURE);
+ } else
print_statistics("/", &iter);
} /* end else */
} /* end if */
@@ -1813,6 +1839,8 @@ done:
h5tools_setstatus(EXIT_FAILURE);
} /* end if */
+ H5Eset_auto2(H5E_DEFAULT, func, edata);
+
leave(h5tools_getstatus());
} /* end main() */
diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake
index cdd3e6d..132fc2b 100644
--- a/tools/test/h5dump/CMakeTests.cmake
+++ b/tools/test/h5dump/CMakeTests.cmake
@@ -21,6 +21,7 @@
# --------------------------------------------------------------------
set (HDF5_REFERENCE_FILES
${HDF5_TOOLS_DIR}/testfiles/charsets.ddl
+ ${HDF5_TOOLS_DIR}/testfiles/err_attr_dspace.ddl
${HDF5_TOOLS_DIR}/testfiles/file_space.ddl
${HDF5_TOOLS_DIR}/testfiles/filter_fail.ddl
${HDF5_TOOLS_DIR}/testfiles/non_existing.ddl
@@ -217,6 +218,7 @@
)
set (HDF5_REFERENCE_TEST_FILES
${HDF5_TOOLS_DIR}/testfiles/charsets.h5
+ ${HDF5_TOOLS_DIR}/testfiles/err_attr_dspace.h5
${HDF5_TOOLS_DIR}/testfiles/file_space.h5
${HDF5_TOOLS_DIR}/testfiles/filter_fail.h5
${HDF5_TOOLS_DIR}/testfiles/packedbits.h5
@@ -1533,6 +1535,9 @@
# test for non-existing file
ADD_H5_TEST (non_existing 1 --enable-error-stack tgroup.h5 non_existing.h5)
+ # test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue
+ ADD_H5_TEST (err_attr_dspace 1 err_attr_dspace.h5)
+
##############################################################################
### P L U G I N T E S T S
##############################################################################
diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c
index 9358fbb..9e9f6f1 100644
--- a/tools/test/h5dump/h5dumpgentest.c
+++ b/tools/test/h5dump/h5dumpgentest.c
@@ -113,6 +113,7 @@
#define FILE83 "tvlenstr_array.h5"
#define FILE84 "tudfilter.h5"
#define FILE85 "tgrpnullspace.h5"
+#define FILE86 "err_attr_dspace.h5"
/*-------------------------------------------------------------------------
* prototypes
@@ -10476,6 +10477,89 @@ static void gent_null_space_group(void)
H5Fclose(fid);
}
+/*-------------------------------------------------------------------------
+ * Function: gent_err_attr_dspace
+ *
+ * Purpose: Generate a file with shared dataspace message.
+ * Then write an illegal version to the shared dataspace message
+ * to trigger the error.
+ * This is to verify HDFFV-10333 that h5dump will exit
+ * gracefully when encountered error similar to
+ * H5O_attr_decode in the jira issue.
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+gent_err_attr_dspace()
+{
+ hid_t fid = -1; /* File identifier */
+ hid_t fcpl = -1; /* File access property list */
+ hid_t sid = -1; /* Dataspace identifier */
+ hid_t aid = -1; /* Attribute identifier */
+ hsize_t dims = 2; /* Dimensino size */
+ int wdata[2] = {7, 42}; /* The buffer to write */
+ int fd = -1; /* The file descriptor */
+ char val = 6; /* An invalid version */
+
+ /* Create an fcpl */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ goto error;
+
+ /* Set up the dataspace message to be shared */
+ if(H5Pset_shared_mesg_nindexes(fcpl, 1) < 0)
+ goto error;
+ if(H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_SDSPACE_FLAG, 1) < 0)
+ goto error;
+
+ /* Create the file with the shared message setting */
+ if((fid = H5Fcreate(FILE86, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create the dataspace */
+ if((sid = H5Screate_simple(1, &dims, &dims)) < 0)
+ goto error;
+
+ /* Create an attribute with shared dataspace */
+ if((aid = H5Acreate2(fid, "attribute", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Awrite(aid, H5T_NATIVE_INT, wdata) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Aclose(aid) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Pclose(fcpl) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+ /* This section of code will write an illegal version to the "version" field
+ of the shared dataspace message */
+ if((fd = HDopen(FILE86, O_RDWR, 0633)) < 0)
+ goto error;
+
+ /* Offset of the "version" field to modify is as follows: */
+ /* 1916: offset of the object header containing the attribute message */
+ /* 32: offset of the attribute message in the object header */
+ /* 30: offset in the attribute message containing the version of the shared dataspace message */
+ if(HDlseek(fd, 1916+32+30, SEEK_SET) < 0)
+ goto error;
+ if(HDwrite(fd, &val, 1) < 0)
+ goto error;
+ if(HDclose(fd) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fcpl);
+ H5Aclose(aid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+} /* gen_err_attr_dspace() */
+
int main(void)
{
gent_group();
@@ -10569,6 +10653,8 @@ int main(void)
gent_udfilter();
+ gent_err_attr_dspace();
+
return 0;
}
diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in
index 1935b0d..42e4b07 100644
--- a/tools/test/h5dump/testh5dump.sh.in
+++ b/tools/test/h5dump/testh5dump.sh.in
@@ -175,6 +175,7 @@ $SRC_H5DUMP_TESTFILES/tvldtypes5.h5
$SRC_H5DUMP_TESTFILES/tvlenstr_array.h5
$SRC_H5DUMP_TESTFILES/tvlstr.h5
$SRC_H5DUMP_TESTFILES/tvms.h5
+$SRC_H5DUMP_TESTFILES/err_attr_dspace.h5
"
LIST_OTHER_TEST_FILES="
@@ -360,6 +361,7 @@ $SRC_H5DUMP_TESTFILES/twithddlfile.exp
$SRC_H5DUMP_TESTFILES/h5dump-help.txt
$SRC_H5DUMP_TESTFILES/out3.h5import
$SRC_H5DUMP_TESTFILES/tbinregR.exp
+$SRC_H5DUMP_TESTFILES/err_attr_dspace.ddl
"
LIST_ERROR_TEST_FILES="
@@ -1364,6 +1366,9 @@ TOOLTEST2 tall-6.exp --enable-error-stack -y -o tall-6.txt -d /g1/g1.1/dset1.1.1
# test for non-existing file
TOOLTEST3 non_existing.ddl --enable-error-stack tgroup.h5 non_existing.h5
+# test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue
+TOOLTEST err_attr_dspace.ddl err_attr_dspace.h5
+
# Clean up temporary files/directories
CLEAN_TESTFILES_AND_TESTDIR
diff --git a/tools/test/h5stat/CMakeTests.cmake b/tools/test/h5stat/CMakeTests.cmake
index 69bab20..bd55ac1 100644
--- a/tools/test/h5stat/CMakeTests.cmake
+++ b/tools/test/h5stat/CMakeTests.cmake
@@ -20,6 +20,9 @@
# Copy all the HDF5 files from the test directory into the source directory
# --------------------------------------------------------------------
set (HDF5_REFERENCE_FILES
+ h5stat_err_refcount
+ h5stat_err_old_layout
+ h5stat_err_old_fill
h5stat_help1
h5stat_help2
h5stat_notexist
@@ -54,6 +57,9 @@
h5stat_numattrs4
)
set (HDF5_REFERENCE_TEST_FILES
+ h5stat_err_refcount.h5
+ h5stat_err_old_layout.h5
+ h5stat_err_old_fill.h5
h5stat_filters.h5
h5stat_idx.h5
h5stat_tsohm.h5
@@ -160,9 +166,7 @@
ADD_H5_TEST (h5stat_newgrat-UG 0 -G h5stat_newgrat.h5)
ADD_H5_TEST (h5stat_newgrat-UA 0 -A h5stat_newgrat.h5)
# h5stat_idx.h5 is generated by h5stat_gentest.c
- if (HDF5_BUILD_GENERATORS)
- ADD_H5_TEST (h5stat_idx 0 h5stat_idx.h5)
- endif ()
+ ADD_H5_TEST (h5stat_idx 0 h5stat_idx.h5)
#
# Tests for -l (--links) option on h5stat_threshold.h5:
# -l 0 (incorrect threshold value)
@@ -204,3 +208,11 @@
# -A -a 100
ADD_H5_TEST (h5stat_numattrs4 0 -A -a 100 h5stat_newgrat.h5)
#
+# Tests to verify HDFFV-10333:
+# h5stat_err_refcount.h5 is generated by h5stat_gentest.c
+# h5stat_err_old_layout.h5 and h5stat_err_old_fill.h5: see explanation in h5stat_gentest.c
+ ADD_H5_TEST (h5stat_err_refcount 1 h5stat_err_refcount.h5)
+ ADD_H5_TEST (h5stat_err_old_layout 1 h5stat_err_old_layout.h5)
+ ADD_H5_TEST (h5stat_err_old_fill 1 h5stat_err_old_fill.h5)
+#
+#
diff --git a/tools/test/h5stat/h5stat_gentest.c b/tools/test/h5stat/h5stat_gentest.c
index 0f696d0..2daf24b 100644
--- a/tools/test/h5stat/h5stat_gentest.c
+++ b/tools/test/h5stat/h5stat_gentest.c
@@ -21,6 +21,7 @@
* of the expected output and update the corresponding *.ddl files.
*/
#include "hdf5.h"
+#include "H5private.h"
/* For gen_newgrat_file() */
#define NEWGRAT_FILE "h5stat_newgrat.h5"
@@ -43,6 +44,9 @@
#define THRES_NUM 10
#define THRES_NUM_25 25
+/* For gen_err_refcount() */
+#define ERR_REFCOUNT_FILE "h5stat_err_refcount.h5"
+
/*
* Generate HDF5 file with latest format with
* NUM_GRPS groups and NUM_ATTRS attributes for the dataset
@@ -434,6 +438,140 @@ error:
} /* gen_idx_file() */
+/*
+ * Function: gen_err_refcount_file
+ *
+ * Purpose: Create a file with a refcount message ID.
+ * Then a refcount message ID is written to a
+ * message in a version 1 object header.
+ * This will trigger the error as a version 1
+ * object header does not support a refcount message.
+ * This is to verify HDFFV-10333 that h5stat will exit
+ * gracefully when encountered error similar to
+ * H5O_refcount_decode in the jira issue.
+ *
+ */
+static void
+gen_err_refcount(const char *fname)
+{
+ hid_t fid = -1; /* File identifier */
+ hid_t sid = -1; /* Dataspace message */
+ hid_t did = -1; /* Dataset identifier */
+ hid_t gid = -1; /* Group identifier */
+ hid_t aid1 = -1, aid2 = -1; /* Attribute identifier */
+ hid_t tid = -1; /* Datatype identifier */
+ int i, n; /* Local index variables */
+ int buf[10]; /* Data buffer */
+ hsize_t dims[1]; /* Dimension size */
+ int fd = -1; /* File descriptor */
+ unsigned short val = 22; /* The refcount message ID */
+
+ /* Initialize data buffer */
+ n = 0;
+ for(i = 0; i < 10; i++)
+ buf[i] = n++;
+
+ /* Create the file */
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create a committed datatype in the group */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ goto error;
+ if(H5Tcommit2(gid, "dtype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ goto error;
+
+ /* Create the dataspace */
+ dims[0] = 10;
+ if((sid = H5Screate_simple(1, dims, NULL)) < 0)
+ goto error;
+
+ /* Create a dataset with the committed datatype in the file */
+ if((did = H5Dcreate2(fid, "dset", tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Attach an attribute with the committed datatype to the group */
+ if((aid1 = H5Acreate2(gid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ /* Attach an attribute with the committed datatype to the dataset */
+ if((aid2 = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Aclose(aid1) < 0)
+ goto error;
+ if(H5Aclose(aid2) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
+ if(H5Tclose(tid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+ /* This section of code will write a refcount message ID to a message in the
+ version 1 object header which does not support a refcount message */
+ /* Offset of the message ID to modify is as follows: */
+ /* 4520: the offset of the object header containing the attribute message
+ with the committed datatype */
+ /* 24: the offset in the object header containing the version of the
+ attribute message */
+ if((fd = HDopen(fname, O_RDWR, 0633)) < 0)
+ goto error;
+ if(HDlseek(fd, 4520+24, SEEK_SET) < 0)
+ goto error;
+ if(HDwrite(fd, &val, 2) < 0)
+ goto error;
+ if(HDclose(fd) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Gclose(gid);
+ H5Dclose(did);
+ H5Tclose(tid);
+ H5Sclose(sid);
+ H5Aclose(aid1);
+ H5Aclose(aid2);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+} /* gen_err_refcount() */
+
+/*
+ * The following two test files are generated with older versions
+ * of the library for HDFFV-10333. They are used for testing in
+ * testh5stat.sh.in.
+ *
+ * (1) h5stat_err_old_layout.h5
+ * This file is generated with the 1.6 library so that a file
+ * with a version 2 layout message is created.
+ * Then a "0" is written to the "dimension" field in the layout
+ * message to trigger the error.
+ * This is to verify HDFFV-10333 that h5stat will exit gracefully
+ * when encountered error similar to H5O__layout_decode in the
+ * jira issue.
+ *
+ * (2) h5stat_err_old_fill.h5
+ * This file is generated with the 1.4 library so that a file
+ * with an old fill value message is created.
+ * Then an illegal size is written to the "size" fild in the
+ * fill value message to trigger the error.
+ * This is to verify HDFFV-10333 that h5stat will exit gracefully
+ * when encountered error similar to H5O_fill_old_decode in the
+ * jira issue.
+ */
+
int main(void)
{
gen_newgrat_file(NEWGRAT_FILE);
@@ -442,6 +580,9 @@ int main(void)
/* Generate an HDF file to test for datasets with Fixed Array indexing */
gen_idx_file(IDX_FILE);
+ /* Generate a file with a refcount message ID */
+ gen_err_refcount(ERR_REFCOUNT_FILE);
+
return 0;
}
diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl b/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl
new file mode 100644
index 0000000..e751b7f
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl
@@ -0,0 +1,2 @@
+Filename: h5stat_err_old_fill.h5
+h5stat error: unable to traverse objects/links in file "h5stat_err_old_fill.h5"
diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 b/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5
new file mode 100644
index 0000000..aa164f0
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5
Binary files differ
diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl b/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl
new file mode 100644
index 0000000..a3e27e2
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl
@@ -0,0 +1,2 @@
+Filename: h5stat_err_old_layout.h5
+h5stat error: unable to traverse objects/links in file "h5stat_err_old_layout.h5"
diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 b/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5
new file mode 100644
index 0000000..5c0b5dc
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5
Binary files differ
diff --git a/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl b/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl
new file mode 100644
index 0000000..1f1b491
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl
@@ -0,0 +1,2 @@
+Filename: h5stat_err_refcount.h5
+h5stat error: unable to traverse objects/links in file "h5stat_err_refcount.h5"
diff --git a/tools/test/h5stat/testfiles/h5stat_err_refcount.h5 b/tools/test/h5stat/testfiles/h5stat_err_refcount.h5
new file mode 100644
index 0000000..5e0d5f9
--- /dev/null
+++ b/tools/test/h5stat/testfiles/h5stat_err_refcount.h5
Binary files differ
diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl
index d2a8715..01e39af 100644
--- a/tools/test/h5stat/testfiles/h5stat_help1.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl
@@ -22,3 +22,4 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl
index d2a8715..01e39af 100644
--- a/tools/test/h5stat/testfiles/h5stat_help2.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl
@@ -22,3 +22,4 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
index d8a8b2c..7171320 100644
--- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
@@ -22,4 +22,5 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
h5stat error: missing file name
diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in
index ca7ca4c..5082daf 100644
--- a/tools/test/h5stat/testh5stat.sh.in
+++ b/tools/test/h5stat/testh5stat.sh.in
@@ -69,6 +69,9 @@ test -d $TESTDIR || mkdir $TESTDIR
# Comment '#' without space can be used.
# --------------------------------------------------------------------
LIST_HDF5_TEST_FILES="
+$SRC_H5STAT_TESTFILES/h5stat_err_refcount.h5
+$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.h5
+$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.h5
$SRC_H5STAT_TESTFILES/h5stat_filters.h5
$SRC_H5STAT_TESTFILES/h5stat_tsohm.h5
$SRC_H5STAT_TESTFILES/h5stat_newgrat.h5
@@ -77,6 +80,9 @@ $SRC_H5STAT_TESTFILES/h5stat_threshold.h5
"
LIST_OTHER_TEST_FILES="
+$SRC_H5STAT_TESTFILES/h5stat_err_refcount.ddl
+$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.ddl
+$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.ddl
$SRC_H5STAT_TESTFILES/h5stat_help1.ddl
$SRC_H5STAT_TESTFILES/h5stat_help2.ddl
$SRC_H5STAT_TESTFILES/h5stat_notexist.ddl
@@ -243,6 +249,7 @@ TOOLTEST h5stat_help2.ddl --help
TOOLTEST h5stat_notexist.ddl notexist.h5
TOOLTEST h5stat_nofile.ddl ''
+
# Test file with groups, compressed datasets, user-applied fileters, etc.
# h5stat_filters.h5 is a copy of ../../testfiles/tfilters.h5 as of release 1.8.0-alpha4
TOOLTEST h5stat_filters.ddl h5stat_filters.h5
@@ -304,7 +311,15 @@ TOOLTEST h5stat_numattrs3.ddl -A --numattrs=25 h5stat_threshold.h5
# -A -a 100
TOOLTEST h5stat_numattrs4.ddl -A -a 100 h5stat_newgrat.h5
#
-
+#
+# Tests to verify HDFFV-10333
+# h5stat_err_refcount.h5 is generated by h5stat_gentest.c
+# h5stat_err_old_layout.h5 and h5stat_err_old_fill.h5: see explanation in h5stat_gentest.c
+TOOLTEST h5stat_err_refcount.ddl h5stat_err_refcount.h5
+TOOLTEST h5stat_err_old_layout.ddl h5stat_err_old_layout.h5
+TOOLTEST h5stat_err_old_fill.ddl h5stat_err_old_fill.h5
+#
+#
# Clean up temporary files/directories
CLEAN_TESTFILES_AND_TESTDIR
diff --git a/tools/testfiles/err_attr_dspace.ddl b/tools/testfiles/err_attr_dspace.ddl
new file mode 100644
index 0000000..6c45322
--- /dev/null
+++ b/tools/testfiles/err_attr_dspace.ddl
@@ -0,0 +1,5 @@
+HDF5 "err_attr_dspace.h5" {
+GROUP "/" {
+}
+}
+h5dump error: error getting attribute information
diff --git a/tools/testfiles/err_attr_dspace.h5 b/tools/testfiles/err_attr_dspace.h5
new file mode 100644
index 0000000..969c328
--- /dev/null
+++ b/tools/testfiles/err_attr_dspace.h5
Binary files differ