summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBinh-Minh Ribler <bmribler@hdfgroup.org>2019-09-04 07:09:35 (GMT)
committerBinh-Minh Ribler <bmribler@hdfgroup.org>2019-09-04 07:09:35 (GMT)
commit339bf241bebfc7b9634e1880899acc23b055949a (patch)
treeccc79bc92d7b6c62752bc0036a2e599f75b7deda
parent714804b28018c8096f65debbbb9f57f7dab2a7f6 (diff)
parentb1dab421dec1b7be64ef2c804a7e0e9b68630fc9 (diff)
downloadhdf5-339bf241bebfc7b9634e1880899acc23b055949a.zip
hdf5-339bf241bebfc7b9634e1880899acc23b055949a.tar.gz
hdf5-339bf241bebfc7b9634e1880899acc23b055949a.tar.bz2
Merge branch 'develop' of https://bitbucket.hdfgroup.org/scm/~bmribler/hdf5-bmr into develop
-rw-r--r--MANIFEST1
-rw-r--r--config/cmake/CMakeFindJavaCommon.cmake2
-rw-r--r--config/cmake/UseJava.cmake2
-rw-r--r--config/cmake/hdf5-config.cmake.in6
-rw-r--r--config/cmake/jrunTest.cmake10
-rw-r--r--config/cmake/vfdTest.cmake8
-rw-r--r--config/cmake/volTest.cmake8
-rw-r--r--config/cmake_ext_mod/grepTest.cmake8
-rw-r--r--config/cmake_ext_mod/runTest.cmake10
-rw-r--r--config/toolchain/build32.cmake39
-rw-r--r--java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java6
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/H5Cmpio.c4
-rw-r--r--src/H5Fint.c46
-rw-r--r--src/H5Fpkg.h13
-rw-r--r--src/H5Fprivate.h4
-rw-r--r--src/H5Fquery.c6
-rw-r--r--test/CMakeLists.txt15
-rw-r--r--test/flushrefreshTest.cmake14
-rw-r--r--test/h5test.c2
-rw-r--r--testpar/CMakeLists.txt4
-rw-r--r--testpar/CMakeTests.cmake3
-rw-r--r--testpar/t_file.c5
-rw-r--r--testpar/t_mdset.c592
-rw-r--r--tools/lib/h5tools_dump.c26
-rw-r--r--tools/src/h5dump/h5dump_ddl.c8
-rw-r--r--tools/test/perform/iopipe.c14
-rw-r--r--tools/test/perform/overhead.c6
-rw-r--r--tools/test/perform/perf_meta.c28
-rw-r--r--tools/test/perform/pio_engine.c90
-rw-r--r--tools/test/perform/pio_perf.c320
-rw-r--r--tools/test/perform/pio_standalone.h2
-rw-r--r--tools/test/perform/sio_engine.c46
-rw-r--r--tools/test/perform/sio_perf.c234
-rw-r--r--tools/test/perform/sio_standalone.h2
-rw-r--r--tools/testfiles/tudlink-2.ddl1
36 files changed, 815 insertions, 772 deletions
diff --git a/MANIFEST b/MANIFEST
index 10bffba..2e0022e 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -3264,6 +3264,7 @@
./java/lib/ext/slf4j-simple-1.7.25.jar
# CMake-specific Files
+./config/toolchain/build32.cmake
./config/toolchain/crayle.cmake
./config/toolchain/GCC.cmake
./config/toolchain/intel.cmake
diff --git a/config/cmake/CMakeFindJavaCommon.cmake b/config/cmake/CMakeFindJavaCommon.cmake
index eabb622..528791d 100644
--- a/config/cmake/CMakeFindJavaCommon.cmake
+++ b/config/cmake/CMakeFindJavaCommon.cmake
@@ -16,7 +16,7 @@ else()
set(_JAVA_HOME_EXPLICIT 1)
else()
set(_CMD_JAVA_HOME "")
- if(APPLE AND EXISTS /usr/libexec/java_home)
+ if(APPLE AND EXISTS "/usr/libexec/java_home")
execute_process(COMMAND /usr/libexec/java_home
OUTPUT_VARIABLE _CMD_JAVA_HOME OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
diff --git a/config/cmake/UseJava.cmake b/config/cmake/UseJava.cmake
index 1f6b04c..8efee11 100644
--- a/config/cmake/UseJava.cmake
+++ b/config/cmake/UseJava.cmake
@@ -634,7 +634,7 @@ function(add_jar _TARGET_NAME)
)
else ()
# create an empty java_class_filelist
- if (NOT EXISTS ${CMAKE_JAVA_CLASS_OUTPUT_PATH}/java_class_filelist)
+ if (NOT EXISTS "${CMAKE_JAVA_CLASS_OUTPUT_PATH}/java_class_filelist")
file(WRITE ${CMAKE_JAVA_CLASS_OUTPUT_PATH}/java_class_filelist "")
endif()
endif ()
diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in
index a0d6c49..afb2a5c 100644
--- a/config/cmake/hdf5-config.cmake.in
+++ b/config/cmake/hdf5-config.cmake.in
@@ -191,16 +191,16 @@ foreach (libtype IN LISTS ${HDF5_PACKAGE_NAME}_LIB_TYPE)
else ()
set (${HDF5_PACKAGE_NAME}_${libtype}_${comp}_FOUND 1)
string(TOUPPER ${HDF5_PACKAGE_NAME}_${comp}_${libtype}_LIBRARY COMP_LIBRARY)
- set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF5_PACKAGE@::${hdf5_comp2}-${libtype} @HDF5_PACKAGE@::${hdf5_comp}-${libtype})
+ set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF_PACKAGE_NAMESPACE@${hdf5_comp2}-${libtype} @HDF_PACKAGE_NAMESPACE@${hdf5_comp}-${libtype})
endif ()
elseif (comp STREQUAL "Java")
set (${HDF5_PACKAGE_NAME}_${comp}_FOUND 1)
string(TOUPPER ${HDF5_PACKAGE_NAME}_${comp}_LIBRARY COMP_LIBRARY)
- set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF5_PACKAGE@::${hdf5_comp})
+ set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF_PACKAGE_NAMESPACE@${hdf5_comp})
else ()
set (${HDF5_PACKAGE_NAME}_${libtype}_${comp}_FOUND 1)
string(TOUPPER ${HDF5_PACKAGE_NAME}_${comp}_${libtype}_LIBRARY COMP_LIBRARY)
- set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF5_PACKAGE@::${hdf5_comp}-${libtype})
+ set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @HDF_PACKAGE_NAMESPACE@${hdf5_comp}-${libtype})
endif ()
endif ()
endforeach ()
diff --git a/config/cmake/jrunTest.cmake b/config/cmake/jrunTest.cmake
index fa687f5..e736b7a 100644
--- a/config/cmake/jrunTest.cmake
+++ b/config/cmake/jrunTest.cmake
@@ -33,11 +33,11 @@ if (NOT TEST_CLASSPATH)
message (STATUS "Require TEST_CLASSPATH to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -73,7 +73,7 @@ execute_process (
message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the .err file exists and ERRROR_APPEND is enabled
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
if (TEST_MASK_FILE)
STRING(REGEX REPLACE "CurrentDir is [^\n]+\n" "CurrentDir is (dir name)\n" TEST_STREAM "${TEST_STREAM}")
@@ -123,7 +123,7 @@ message (STATUS "COMMAND Error: ${TEST_ERROR}")
# compare output files to references unless this must be skipped
if (NOT TEST_SKIP_COMPARE)
- if (EXISTS ${TEST_FOLDER}/${TEST_REFERENCE})
+ if (EXISTS "${TEST_FOLDER}/${TEST_REFERENCE}")
if (WIN32 OR MINGW)
configure_file(${TEST_FOLDER}/${TEST_REFERENCE} ${TEST_FOLDER}/${TEST_REFERENCE}.tmp NEWLINE_STYLE CRLF)
file(RENAME ${TEST_FOLDER}/${TEST_REFERENCE}.tmp ${TEST_FOLDER}/${TEST_REFERENCE})
@@ -254,7 +254,7 @@ if (TEST_GREP_COMPARE)
string (REGEX MATCH "${TEST_FILTER}" TEST_MATCH ${TEST_STREAM})
if (TEST_EXPECT)
- # TEST_EXPECT (1) interperts TEST_FILTER as NOT to match
+ # TEST_EXPECT (1) interprets TEST_FILTER as; NOT to match
string (LENGTH "${TEST_MATCH}" TEST_RESULT)
if (TEST_RESULT)
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} did contain ${TEST_FILTER}")
diff --git a/config/cmake/vfdTest.cmake b/config/cmake/vfdTest.cmake
index 3556d07..12ee40b 100644
--- a/config/cmake/vfdTest.cmake
+++ b/config/cmake/vfdTest.cmake
@@ -23,11 +23,11 @@ if (NOT TEST_VFD)
message (FATAL_ERROR "Require TEST_VFD to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -54,7 +54,7 @@ execute_process (
message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the .err file exists and ERRROR_APPEND is enabled
-if (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err)
+if (ERROR_APPEND AND EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err TEST_STREAM)
file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out "${TEST_STREAM}")
endif ()
@@ -62,7 +62,7 @@ endif ()
# if the return value is !=${TEST_EXPECT} bail out
if (NOT TEST_RESULT EQUAL TEST_EXPECT)
if (NOT TEST_NOERRDISPLAY)
- if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out)
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out TEST_STREAM)
message (STATUS "Output USING ${TEST_VFD}:\n${TEST_STREAM}")
endif ()
diff --git a/config/cmake/volTest.cmake b/config/cmake/volTest.cmake
index 6554f00..1dcd2b1 100644
--- a/config/cmake/volTest.cmake
+++ b/config/cmake/volTest.cmake
@@ -23,11 +23,11 @@ if (NOT TEST_VOL)
message (FATAL_ERROR "Require TEST_VOL to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -54,7 +54,7 @@ execute_process (
message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the .err file exists and ERRROR_APPEND is enabled
-if (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (ERROR_APPEND AND EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT}.out "${TEST_STREAM}")
endif ()
@@ -62,7 +62,7 @@ endif ()
# if the return value is !=${TEST_EXPECT} bail out
if (NOT TEST_RESULT EQUAL TEST_EXPECT)
if (NOT TEST_NOERRDISPLAY)
- if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.out)
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.out")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.out TEST_STREAM)
message (STATUS "Output USING ${TEST_VOL}:\n${TEST_STREAM}")
endif ()
diff --git a/config/cmake_ext_mod/grepTest.cmake b/config/cmake_ext_mod/grepTest.cmake
index ab00e70..78ee0da 100644
--- a/config/cmake_ext_mod/grepTest.cmake
+++ b/config/cmake_ext_mod/grepTest.cmake
@@ -35,11 +35,11 @@ if (NOT TEST_REFERENCE)
message (FATAL_ERROR "Require TEST_REFERENCE to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -84,7 +84,7 @@ endif ()
# if the TEST_ERRREF exists grep the error output with the error reference
if (TEST_ERRREF)
# if the .err file exists grep the error output with the error reference before comparing stdout
- if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_ERR_STREAM)
# TEST_ERRREF should always be matched
@@ -97,7 +97,7 @@ if (TEST_ERRREF)
#always compare output file to reference unless this must be skipped
if (NOT TEST_SKIP_COMPARE)
- if (EXISTS ${TEST_FOLDER}/${TEST_REFERENCE})
+ if (EXISTS "${TEST_FOLDER}/${TEST_REFERENCE}")
if (WIN32 OR MINGW)
configure_file(${TEST_FOLDER}/${TEST_REFERENCE} ${TEST_FOLDER}/${TEST_REFERENCE}.tmp NEWLINE_STYLE CRLF)
file(RENAME ${TEST_FOLDER}/${TEST_REFERENCE}.tmp ${TEST_FOLDER}/${TEST_REFERENCE})
diff --git a/config/cmake_ext_mod/runTest.cmake b/config/cmake_ext_mod/runTest.cmake
index 21c8c18..e601653 100644
--- a/config/cmake_ext_mod/runTest.cmake
+++ b/config/cmake_ext_mod/runTest.cmake
@@ -27,11 +27,11 @@ if (NOT TEST_EXPECT)
message (STATUS "Require TEST_EXPECT to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -88,7 +88,7 @@ endif ()
message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the .err file exists and ERRROR_APPEND is enabled
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
if (TEST_MASK_FILE)
STRING(REGEX REPLACE "CurrentDir is [^\n]+\n" "CurrentDir is (dir name)\n" TEST_STREAM "${TEST_STREAM}")
@@ -113,7 +113,7 @@ endif ()
# if the return value is !=${TEST_EXPECT} bail out
if (NOT TEST_RESULT EQUAL TEST_EXPECT)
if (NOT TEST_NOERRDISPLAY)
- if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
message (STATUS "Output :\n${TEST_STREAM}")
endif ()
@@ -204,7 +204,7 @@ endif ()
# compare output files to references unless this must be skipped
if (NOT TEST_SKIP_COMPARE)
- if (EXISTS ${TEST_FOLDER}/${TEST_REFERENCE})
+ if (EXISTS "${TEST_FOLDER}/${TEST_REFERENCE}")
if (WIN32 OR MINGW)
configure_file(${TEST_FOLDER}/${TEST_REFERENCE} ${TEST_FOLDER}/${TEST_REFERENCE}.tmp NEWLINE_STYLE CRLF)
file(RENAME ${TEST_FOLDER}/${TEST_REFERENCE}.tmp ${TEST_FOLDER}/${TEST_REFERENCE})
diff --git a/config/toolchain/build32.cmake b/config/toolchain/build32.cmake
new file mode 100644
index 0000000..d078956
--- /dev/null
+++ b/config/toolchain/build32.cmake
@@ -0,0 +1,39 @@
+if (WIN32)
+ set (CMAKE_SYSTEM_NAME Windows)
+ set (CMAKE_GENERATOR_PLATFORM "x86")
+elseif(APPLE)
+ set (CMAKE_OSX_ARCHITECTURES "i386")
+else ()
+ set (CMAKE_SYSTEM_NAME Linux)
+
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32" CACHE STRING "c++ flags")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32" CACHE STRING "c flags")
+
+ set (LIB32 /usr/lib) # Fedora
+
+ if (EXISTS "/usr/lib32")
+ set (LIB32 /usr/lib32) # Arch, Solus
+ endif ()
+
+ set (CMAKE_SYSTEM_LIBRARY_PATH ${LIB32} CACHE STRING "system library search path" FORCE)
+ set (CMAKE_LIBRARY_PATH ${LIB32} CACHE STRING "library search path" FORCE)
+
+ # this is probably unlikely to be needed, but just in case
+ set (CMAKE_EXE_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "executable linker flags" FORCE)
+ set (CMAKE_SHARED_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "shared library linker flags" FORCE)
+ set (CMAKE_MODULE_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "module linker flags" FORCE)
+
+ # on Fedora and Arch and similar, point pkgconfig at 32 bit .pc files. We have
+ # to include the regular system .pc files as well (at the end), because some
+ # are not always present in the 32 bit directory
+ if (EXISTS "${LIB32}/pkgconfig")
+ set (ENV{PKG_CONFIG_LIBDIR} ${LIB32}/pkgconfig:/usr/share/pkgconfig:/usr/lib/pkgconfig:/usr/lib64/pkgconfig)
+ endif ()
+# where is the target environment
+ set (CMAKE_FIND_ROOT_PATH ${LIB32})
+# search for programs in the build host directories
+ set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+# for libraries and headers in the target directories
+ set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+ set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+endif () \ No newline at end of file
diff --git a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
index f56a038..c13473c 100644
--- a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
@@ -17,7 +17,7 @@ package hdf.hdf5lib.structs;
import java.io.Serializable;
/*
- * Java representation of the HDFS VFD file access property list (fapl)
+ * Java representation of the HDFS VFD file access property list (fapl)
* structure.
*
* Used for the access of files hosted on the Hadoop Distributed File System.
@@ -33,7 +33,7 @@ public class H5FD_hdfs_fapl_t implements Serializable {
private int namenode_port;
private int stream_buffer_size;
- /**
+ /*
* Create a fapl_t structure with the specified components.
*/
public H5FD_hdfs_fapl_t(
@@ -91,7 +91,7 @@ public class H5FD_hdfs_fapl_t implements Serializable {
public String toString() {
return "H5FD_hdfs_fapl_t (Version: " + this.version + ") {" +
"\n namenode_name: '" + this.namenode_name +
- "'\n namenode_port: " + this.namenode_port +
+ "'\n namenode_port: " + this.namenode_port +
"\n user_name: '" + this.user_name +
"'\n kerberos_ticket_cache: '" + this.kerberos_ticket_cache +
"'\n stream_buffer_size: " + this.stream_buffer_size +
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 313d231..97ae847 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -990,7 +990,7 @@ if (LOCAL_BATCH_TEST)
endif ()
endif ()
-if (NOT EXISTS ${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c)
+if (NOT EXISTS "${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c")
add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
target_include_directories (H5detect PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
target_compile_definitions(H5detect PUBLIC ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS})
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 84ec16c..e784487 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -222,7 +222,7 @@ H5C_apply_candidate_list(H5F_t * f,
HDfprintf(stdout, "%s", tbl_buf);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- if(f->coll_md_write) {
+ if(f->shared->coll_md_write) {
/* Sanity check */
HDassert(NULL == cache_ptr->coll_write_list);
@@ -386,7 +386,7 @@ H5C_apply_candidate_list(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
/* If we've deferred writing to do it collectively, take care of that now */
- if(f->coll_md_write) {
+ if(f->shared->coll_md_write) {
/* Sanity check */
HDassert(cache_ptr->coll_write_list);
diff --git a/src/H5Fint.c b/src/H5Fint.c
index c56bd10..5e2cf26 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -246,9 +246,9 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't set minimum raw data fraction of page buffer")
} /* end if */
#ifdef H5_HAVE_PARALLEL
- if(H5P_set(new_plist, H5_COLL_MD_READ_FLAG_NAME, &(f->coll_md_read)) < 0)
+ if(H5P_set(new_plist, H5_COLL_MD_READ_FLAG_NAME, &(f->shared->coll_md_read)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't set collective metadata read flag")
- if(H5P_set(new_plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
+ if(H5P_set(new_plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->shared->coll_md_write)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't set collective metadata read flag")
#endif /* H5_HAVE_PARALLEL */
if(H5P_set(new_plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
@@ -1020,9 +1020,9 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F
if(NULL == (f->shared->efc = H5F__efc_create(efc_size)))
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "can't create external file cache")
#ifdef H5_HAVE_PARALLEL
- if(H5P_get(plist, H5_COLL_MD_READ_FLAG_NAME, &(f->coll_md_read)) < 0)
+ if(H5P_get(plist, H5_COLL_MD_READ_FLAG_NAME, &(f->shared->coll_md_read)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata read flag")
- if(H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
+ if(H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->shared->coll_md_write)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata write flag")
#endif /* H5_HAVE_PARALLEL */
if(H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
@@ -1387,6 +1387,9 @@ H5F__dest(H5F_t *f, hbool_t flush)
f->shared->mtab.child = (H5F_mount_t *)H5MM_xfree(f->shared->mtab.child);
f->shared->mtab.nalloc = 0;
+ /* Free the external link file */
+ f->shared->extpath = (char *)H5MM_xfree(f->shared->extpath);
+
/* Clean up the metadata retries array */
for(actype = 0; actype < (int)H5AC_NTYPES; actype++)
if(f->shared->retries[actype])
@@ -1407,7 +1410,6 @@ H5F__dest(H5F_t *f, hbool_t flush)
/* Free the non-shared part of the file */
f->open_name = (char *)H5MM_xfree(f->open_name);
f->actual_name = (char *)H5MM_xfree(f->actual_name);
- f->extpath = (char *)H5MM_xfree(f->extpath);
if(H5FO_top_dest(f) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "problems closing file")
f->shared = NULL;
@@ -1653,7 +1655,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
if(page_buf_size) {
#ifdef H5_HAVE_PARALLEL
/* Collective metadata writes are not supported with page buffering */
- if(file->coll_md_write)
+ if(file->shared->coll_md_write)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "collective metadata writes are not supported with page buffering")
/* Temporary: fail file create when page buffering feature is enabled for parallel */
@@ -1753,8 +1755,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
} /* end if */
/* Formulate the absolute path for later search of target file for external links */
- if(H5_build_extpath(name, &file->extpath) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to build extpath")
+ if(shared->nrefs == 1) {
+ if(H5_build_extpath(name, &file->shared->extpath) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to build extpath")
+ }
/* Formulate the actual file name, after following symlinks, etc. */
if(H5F__build_actual_name(file, a_plist, name, &file->actual_name) < 0)
@@ -2227,7 +2231,6 @@ H5F__reopen(H5F_t *f)
/* Duplicate old file's names */
ret_value->open_name = H5MM_xstrdup(f->open_name);
ret_value->actual_name = H5MM_xstrdup(f->actual_name);
- ret_value->extpath = H5MM_xstrdup(f->extpath);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -3155,31 +3158,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__get_max_eof_eoa() */
-#ifdef H5_HAVE_PARALLEL
-
-/*-------------------------------------------------------------------------
- * Function: H5F_set_coll_md_read
- *
- * Purpose: Set the coll_md_read field with a new value.
- *
- * Return: SUCCEED/FAIL
- *-------------------------------------------------------------------------
- */
-void
-H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t cmr)
-{
- /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity check */
- HDassert(f);
-
- f->coll_md_read = cmr;
-
- FUNC_LEAVE_NOAPI_VOID
-} /* H5F_set_coll_md_read() */
-#endif /* H5_HAVE_PARALLEL */
-
/*-------------------------------------------------------------------------
* Function: H5F_get_metadata_read_retry_info
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 7e89111..6cd2d3c 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -358,6 +358,14 @@ struct H5F_shared_t {
/* Object flush info */
H5F_object_flush_t object_flush; /* Information for object flush callback */
hbool_t crt_dset_min_ohdr_flag; /* flag to minimize created dataset object header */
+
+ char *extpath; /* Path for searching target external link file */
+
+#ifdef H5_HAVE_PARALLEL
+ H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */
+ hbool_t coll_md_write; /* Do all metadata writes collectively */
+#endif /* H5_HAVE_PARALLEL */
+
};
/*
@@ -368,7 +376,6 @@ struct H5F_shared_t {
struct H5F_t {
char *open_name; /* Name used to open file */
char *actual_name; /* Actual name of the file, after resolving symlinks, etc. */
- char *extpath; /* Path for searching target external link file */
H5F_shared_t *shared; /* The shared file info */
unsigned nopen_objs; /* Number of open object headers */
H5FO_t *obj_count; /* # of time each object is opened through top file structure */
@@ -376,10 +383,6 @@ struct H5F_t {
hbool_t closing; /* File is in the process of being closed */
struct H5F_t *parent; /* Parent file that this file is mounted to */
unsigned nmounts; /* Number of children mounted to this file */
-#ifdef H5_HAVE_PARALLEL
- H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */
- hbool_t coll_md_write; /* Do all metadata writes collectively */
-#endif /* H5_HAVE_PARALLEL */
};
/*****************************/
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 0a7daa0..8c70663 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -282,7 +282,7 @@ typedef struct H5F_t H5F_t;
#define H5F_INTENT(F) ((F)->shared->flags)
#define H5F_OPEN_NAME(F) ((F)->open_name)
#define H5F_ACTUAL_NAME(F) ((F)->actual_name)
-#define H5F_EXTPATH(F) ((F)->extpath)
+#define H5F_EXTPATH(F) ((F)->shared->extpath)
#define H5F_SHARED(F) ((F)->shared)
#define H5F_SAME_SHARED(F1, F2) ((F1)->shared == (F2)->shared)
#define H5F_NOPEN_OBJS(F) ((F)->nopen_objs)
@@ -323,7 +323,7 @@ typedef struct H5F_t H5F_t;
#define H5F_USE_TMP_SPACE(F) ((F)->shared->fs.use_tmp_space)
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->fs.tmp_addr, (ADDR)))
#ifdef H5_HAVE_PARALLEL
-#define H5F_COLL_MD_READ(F) ((F)->coll_md_read)
+#define H5F_COLL_MD_READ(F) ((F)->shared->coll_md_read)
#endif /* H5_HAVE_PARALLEL */
#define H5F_USE_MDC_LOGGING(F) ((F)->shared->use_mdc_logging)
#define H5F_START_MDC_LOG_ON_ACCESS(F) ((F)->shared->start_mdc_log_on_access)
diff --git a/src/H5Fquery.c b/src/H5Fquery.c
index c885561..f36f348 100644
--- a/src/H5Fquery.c
+++ b/src/H5Fquery.c
@@ -229,9 +229,9 @@ H5F_get_extpath(const H5F_t *f)
FUNC_ENTER_NOAPI_NOINIT_NOERR
HDassert(f);
- HDassert(f->extpath);
+ HDassert(f->shared->extpath);
- FUNC_LEAVE_NOAPI(f->extpath)
+ FUNC_LEAVE_NOAPI(f->shared->extpath)
} /* end H5F_get_extpath() */
@@ -1095,7 +1095,7 @@ H5F_coll_md_read(const H5F_t *f)
HDassert(f);
- FUNC_LEAVE_NOAPI(f->coll_md_read)
+ FUNC_LEAVE_NOAPI(f->shared->coll_md_read)
} /* end H5F_coll_md_read() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index c1ac44c..8184d7f 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -242,7 +242,7 @@ set (H5_TESTS
page_buffer
dtypes
dsets
- chunk_info
+ chunk_info # compression lib link
cmpd_dset
filter_fail
extend
@@ -302,6 +302,7 @@ macro (ADD_H5_EXE file)
endmacro ()
set (H5_TESTS_MULTIPLE
+ chunk_info
direct_chunk
testhdf5
cache_image
@@ -318,6 +319,18 @@ endforeach ()
### M U L T I P L E S O U R C E T E S T S ###
##############################################################################
######### Also special handling of link libs #############
+#-- Adding test for chunk_info
+add_executable (chunk_info ${HDF5_TEST_SOURCE_DIR}/chunk_info.c)
+target_include_directories (chunk_info PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (chunk_info STATIC)
+ target_link_libraries (chunk_info PRIVATE ${HDF5_TEST_LIB_TARGET} ${LINK_COMP_LIBS})
+else ()
+ TARGET_C_PROPERTIES (chunk_info SHARED)
+ target_link_libraries (chunk_info PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${LINK_COMP_LIBS})
+endif ()
+set_target_properties (chunk_info PROPERTIES FOLDER test)
+
#-- Adding test for direct_chunk
add_executable (direct_chunk ${HDF5_TEST_SOURCE_DIR}/direct_chunk.c)
target_include_directories (direct_chunk PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
diff --git a/test/flushrefreshTest.cmake b/test/flushrefreshTest.cmake
index 01faad5..6faf37b 100644
--- a/test/flushrefreshTest.cmake
+++ b/test/flushrefreshTest.cmake
@@ -30,11 +30,11 @@ if (NOT PERL_EXECUTABLE)
message (STATUS "Require PERL_EXECUTABLE to be defined")
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
endif ()
-if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
endif ()
@@ -69,10 +69,10 @@ endif ()
set (verification_done "0")
while (verification_done LESS "1")
message (STATUS "checking first stage:${TEST_FOLDER}/${TEST_ARGS1}")
- if (EXISTS ${TEST_FOLDER}/${TEST_ERR})
+ if (EXISTS "${TEST_FOLDER}/${TEST_ERR}")
# Error exit script
set (verification_done "3")
- elseif (EXISTS ${TEST_FOLDER}/${TEST_ARGS1})
+ elseif (EXISTS "${TEST_FOLDER}/${TEST_ARGS1}")
file (STRINGS ${TEST_FOLDER}/${TEST_ARGS1} v1)
list (LENGTH v1 len_v1)
message (STATUS "v1:${v1} len_v1:${len_v1}")
@@ -112,10 +112,10 @@ endwhile ()
while (verification_done LESS "2")
message (STATUS "checking second stage:${TEST_FOLDER}/${TEST_ARGS1}")
- if (EXISTS ${TEST_FOLDER}/${TEST_ERR})
+ if (EXISTS "${TEST_FOLDER}/${TEST_ERR}")
# Error exit script
set (verification_done "3")
- elseif (EXISTS ${TEST_FOLDER}/${TEST_ARGS1})
+ elseif (EXISTS "${TEST_FOLDER}/${TEST_ARGS1}")
file (STRINGS ${TEST_FOLDER}/${TEST_ARGS1} v1)
list (LENGTH v1 len_v1)
message (STATUS "v1:${v1} len_v1:${len_v1}")
@@ -158,7 +158,7 @@ message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the return value is !=${TEST_EXPECT} bail out
if (NOT TEST_RESULT EQUAL TEST_EXPECT)
if (NOT TEST_NOERRDISPLAY)
- if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}")
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
message (STATUS "Output :\n${TEST_STREAM}")
endif ()
diff --git a/test/h5test.c b/test/h5test.c
index 7cb5345..dd8d906 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -1193,7 +1193,7 @@ h5_show_hostname(void)
#endif
#ifdef H5_HAVE_GETHOSTNAME
- if (gethostname(hostname, (size_t)80) < 0)
+ if (HDgethostname(hostname, (size_t)80) < 0)
HDprintf(" gethostname failed\n");
else
HDprintf(" hostname=%s\n", hostname);
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 1aecef6..51c3420 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -47,11 +47,13 @@ MACRO (ADD_H5P_EXE file)
TARGET_C_PROPERTIES (${file} STATIC)
target_link_libraries (${file}
PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
)
else ()
TARGET_C_PROPERTIES (${file} SHARED)
target_link_libraries (${file}
PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
)
endif ()
set_target_properties (${file} PROPERTIES FOLDER test/par)
@@ -61,7 +63,7 @@ set (H5P_TESTS
t_mpi
t_bigio
t_cache
- #t_cache_image
+ t_cache_image
t_pflush1
t_pflush2
t_pread
diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake
index ad244b5..5848c60 100644
--- a/testpar/CMakeTests.cmake
+++ b/testpar/CMakeTests.cmake
@@ -72,6 +72,9 @@ endforeach ()
# list (REMOVE_ITEM H5P_TESTS t_shapesame)
#endif ()
+# do not test until new version is added
+list (REMOVE_ITEM H5P_TESTS t_cache_image)
+
set (test_par_CLEANFILES
t_cache_image_00.h5
t_cache_image_01.h5
diff --git a/testpar/t_file.c b/testpar/t_file.c
index f1aff19..204095b 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -736,6 +736,11 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
return nerrors;
}
+/*
+ * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
+ * incoming fapl that could conflict with the existing values in H5F_shared_t on
+ * multiple opens of the same file.
+ */
void
test_file_properties(void)
{
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index e9f4101..63ac8d3 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -852,18 +852,18 @@ void collective_group_write_independent_group_read(void)
*/
void collective_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int i, j, m;
- char gname[64], dname[32];
+ int mpi_rank, mpi_size, size;
+ int i, j, m;
+ char gname[64], dname[32];
hid_t fid, gid, did, plist, dcpl, memspace, filespace;
- DATATYPE * outme = NULL;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
- herr_t ret1, ret2;
+ DATATYPE *outme = NULL;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
+ herr_t ret1, ret2;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -920,8 +920,7 @@ void collective_group_write(void)
for(j = 0; j < size; j++)
outme[(i * size) + j] =(i + j) * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
- outme);
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
H5Dclose(did);
H5Gclose(gid);
@@ -951,7 +950,7 @@ void independent_group_read(void)
hid_t plist, fid;
const H5Ptest_param_t *pt;
char *filename;
- int ngroups;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1071,16 +1070,16 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
*/
void multiple_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int m;
- char gname[64];
- hid_t fid, gid, plist, memspace, filespace;
+ int mpi_rank, mpi_size, size;
+ int m;
+ char gname[64];
+ hid_t fid, gid, plist, memspace, filespace;
hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- herr_t ret;
+ herr_t ret;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1162,11 +1161,11 @@ void multiple_group_write(void)
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
- int i, j, n, size;
- int mpi_rank, mpi_size;
- char dname[32];
- DATATYPE * outme = NULL;
- hid_t did;
+ int i, j, n, size;
+ int mpi_rank, mpi_size;
+ char dname[32];
+ DATATYPE *outme = NULL;
+ hid_t did;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1248,7 +1247,7 @@ void multiple_group_read(void)
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
const H5Ptest_param_t *pt;
char *filename;
- int ngroups;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1323,10 +1322,10 @@ void multiple_group_read(void)
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
- int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
- char dname[32];
+ int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
+ char dname[32];
DATATYPE *outdata = NULL, *indata = NULL;
- hid_t did;
+ hid_t did;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1344,8 +1343,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did>0), dname);
- H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
- indata);
+ H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
/* this is the original value */
for(i=0; i<size; i++)
@@ -1451,7 +1449,7 @@ read_attribute(hid_t obj_id, int this_type, int num)
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
- }
+ }
H5Aclose(aid);
}
else if(this_type == is_dset) {
@@ -1462,7 +1460,7 @@ read_attribute(hid_t obj_id, int this_type, int num)
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, in_data);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
- }
+ }
H5Aclose(aid);
}
@@ -1481,9 +1479,9 @@ read_attribute(hid_t obj_id, int this_type, int num)
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
{
- int mpi_rank, mpi_size, err_num=0;
- hsize_t i, j;
- hsize_t chunk_origin[DIM];
+ int mpi_rank, mpi_size, err_num=0;
+ hsize_t i, j;
+ hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], count[DIM];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -1494,11 +1492,11 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
indata += chunk_origin[0]*size;
outdata += chunk_origin[0]*size;
for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++)
- for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
- if(*indata != *outdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
- }
+ for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
+ if(*indata != *outdata )
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
+ }
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
HDprintf("[more errors ...]\n");
if(err_num)
@@ -1740,8 +1738,8 @@ void io_mode_confusion(void)
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
VRFY((status >= 0 ), "H5Pset_dxpl_mpio() failed");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status>= 0),"set independent IO collectively succeeded");
}
@@ -1967,10 +1965,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
int steps_done = 0;
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t *pt;
+ char *filename;
/*
* setup test bed related variables:
@@ -2067,19 +2065,19 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ NULL, disk_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ mem_start, NULL, mem_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data);
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ disk_space[i], dxpl_id, data);
VRFY((err >= 0), "H5Dwrite(1) failed.\n");
for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
+ data[j] *= 10.0;
}
/*
@@ -2127,7 +2125,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
att_space[i] = H5Screate_simple(1, att_size, NULL);
VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
- att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
+ att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
@@ -2146,7 +2144,6 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
mpi_rank, fcn_name);
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
-
err = H5Sclose(att_space[i]);
VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
err = H5Aclose(att_id[i]);
@@ -2350,20 +2347,20 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* MPI variables */
/* world communication size and rank */
- int mpi_world_size;
- int mpi_world_rank;
+ int mpi_world_size;
+ int mpi_world_rank;
/* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
int steps = -1; /* How far (steps) to verify the file */
int steps_done = -1; /* How far (steps) have been verified */
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t *pt;
+ char *filename;
/*
* setup test bed related variables:
@@ -2380,291 +2377,290 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* Repeatedly re-open the file and verify its contents until it is */
/* told to end (when steps=0). */
while (steps_done != 0){
- Reader_wait(mrc, steps);
- VRFY((mrc >= 0), "Reader_wait failed");
- steps_done = 0;
+ Reader_wait(mrc, steps);
+ VRFY((mrc >= 0), "Reader_wait failed");
+ steps_done = 0;
- if (steps > 0 ){
- /*
- * Set up file access property list with parallel I/O access
- */
+ if (steps > 0 ){
+ /*
+ * Set up file access property list with parallel I/O access
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
+ mpi_rank, fcn_name);
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
- err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
- /*
- * Create a new file collectively and release property list identifier.
- */
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
- mpi_rank, fcn_name, filename);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
+ mpi_rank, fcn_name, filename);
- file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
- VRFY((file_id >= 0 ), "H5Fopen() failed");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
+ VRFY((file_id >= 0 ), "H5Fopen() failed");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
#if 1
- if (steps >= 1){
- /*=====================================================*
- * Step 1: open the data sets and read data.
- *=====================================================*/
+ if (steps >= 1){
+ /*=====================================================*
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
- mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
+ mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = -1;
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = -1;
+ }
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
- VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
- disk_space[i] = H5Dget_space(dataset[i]);
- VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
+ VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
+ disk_space[i] = H5Dget_space(dataset[i]);
+ VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
+ }
- /*
- * setup data transfer property list
- */
+ /*
+ * setup data transfer property list
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
- err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
- "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0),
+ "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
- /*
- * read data from the data sets
- */
+ /*
+ * read data from the data sets
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
- disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
- /* set up expected data for verification */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
- data[j] = (double)(mpi_rank + 1);
- }
+ /* set up expected data for verification */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ data[j] = (double)(mpi_rank + 1);
+ }
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
- mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data_read);
- VRFY((err >= 0), "H5Dread(1) failed.\n");
-
- /* compare read data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (data_read[j] != data[j]){
- HDfprintf(stdout,
- "%0d:%s: Reading datasets value failed in "
- "Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, data[j], data_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ NULL, disk_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ disk_space[i], dxpl_id, data_read);
+ VRFY((err >= 0), "H5Dread(1) failed.\n");
+
+ /* compare read data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (data_read[j] != data[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ data[j] *= 10.0;
+ }
- /*
- * close the data spaces
- */
+ /*
+ * close the data spaces
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sclose(disk_space[i]);
- VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
- err = H5Sclose(mem_space[i]);
- VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
- }
- steps_done++;
- }
- /* End of Step 1: open the data sets and read data. */
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 1: open the data sets and read data. */
#endif
#if 1
- /*=====================================================*
- * Step 2: reading attributes from each dataset
- *=====================================================*/
-
- if (steps >= 2){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
-
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ /*=====================================================*
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
- att[j] = (double)(j + 1);
- }
+ if (steps >= 2){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- hid_t att_id, att_type;
-
- att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
- VRFY((att_id >= 0), "H5Aopen failed.\n");
- att_type = H5Aget_type(att_id);
- VRFY((att_type >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify attribute size before H5Aread */
- err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (att_read[j] != att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, att[j], att_read[j]);
- nerrors++;
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ att[j] = (double)(j + 1);
}
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
- att[j] /= 10.0;
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ hid_t att_id, att_type;
+
+ att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
+ VRFY((att_id >= 0), "H5Aopen failed.\n");
+ att_type = H5Aget_type(att_id);
+ VRFY((att_type >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify attribute size before H5Aread */
+ err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (att_read[j] != att[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ att[j] /= 10.0;
+ }
+ }
+ err = H5Aclose(att_id);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ steps_done++;
}
- err = H5Aclose(att_id);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- steps_done++;
- }
- /* End of Step 2: reading attributes from each dataset */
+ /* End of Step 2: reading attributes from each dataset */
#endif
#if 1
- /*=====================================================*
- * Step 3 or 4: read large attributes from each dataset.
- * Step 4 has different attribute value from step 3.
- *=====================================================*/
-
- if (steps >= 3){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
+ /*=====================================================*
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ if (steps >= 3){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
- lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
- VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
- lg_att_type[i] = H5Aget_type(lg_att_id[i]);
- VRFY((err >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify large attribute size before H5Aread */
- err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
- if (lg_att_read[j] != lg_att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
- nerrors++;
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
}
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
- lg_att[j] /= 10.0;
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
+ lg_att_type[i] = H5Aget_type(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else{
+ /* should verify large attribute size before H5Aread */
+ err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
+ if (lg_att_read[j] != lg_att[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+
+ lg_att[j] /= 10.0;
+ }
+ }
+ err = H5Tclose(lg_att_type[i]);
+ VRFY((err >= 0), "H5Tclose failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ /* Both step 3 and 4 use this same read checking code. */
+ steps_done = (steps==3) ? 3 : 4;
}
- err = H5Tclose(lg_att_type[i]);
- VRFY((err >= 0), "H5Tclose failed.\n");
- err = H5Aclose(lg_att_id[i]);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- /* Both step 3 and 4 use this same read checking code. */
- steps_done = (steps==3) ? 3 : 4;
- }
- /* End of Step 3 or 4: read large attributes from each dataset */
+ /* End of Step 3 or 4: read large attributes from each dataset */
#endif
- /*=====================================================*
- * Step 5: read all objects from the file
- *=====================================================*/
- if (steps>=5){
- /* nothing extra to verify. The file is closed normally. */
- /* Just increment steps_done */
- steps_done++;
- }
+ /*=====================================================*
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps>=5){
+ /* nothing extra to verify. The file is closed normally. */
+ /* Just increment steps_done */
+ steps_done++;
+ }
- /*
- * Close the data sets
- */
+ /*
+ * Close the data sets
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
- mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
+ mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- if ( dataset[i] >= 0 ) {
- err = H5Dclose(dataset[i]);
- VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
- }
- }
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ if ( dataset[i] >= 0 ) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+ }
- /*
- * close the data transfer property list.
- */
+ /*
+ * close the data transfer property list.
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
- err = H5Pclose(dxpl_id);
- VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
- /*
- * Close the file
- */
- if(verbose)
- HDfprintf(stdout, "%0d:%s: closing file again.\n",
- mpi_rank, fcn_name);
- err = H5Fclose(file_id);
- VRFY((err >= 0 ), "H5Fclose(1) failed");
+ /*
+ * Close the file
+ */
+ if(verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n",
+ mpi_rank, fcn_name);
+ err = H5Fclose(file_id);
+ VRFY((err >= 0 ), "H5Fclose(1) failed");
- } /* else if (steps_done==0) */
- Reader_result(mrc, steps_done);
+ } /* else if (steps_done==0) */
+ Reader_result(mrc, steps_done);
} /* end while(1) */
if(verbose )
diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c
index a8b7e30..beb397c 100644
--- a/tools/lib/h5tools_dump.c
+++ b/tools/lib/h5tools_dump.c
@@ -94,7 +94,7 @@ h5tool_format_t h5tools_dataformat = {
const h5tools_dump_header_t h5tools_standardformat = {
"standardformat", /*name */
-"HDF5", /*fileebgin */
+"HDF5", /*filebegin */
"", /*fileend */
SUPER_BLOCK, /*bootblockbegin */
"", /*bootblockend */
@@ -2277,18 +2277,26 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_
h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0);
ctx->indent_level++;
{
- char *ttag;
+ char *ttag;
- if(NULL == (ttag = H5Tget_tag(type)))
- H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_tag failed")
+ if(NULL == (ttag = H5Tget_tag(type)))
+ H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_tag failed")
- ctx->need_prefix = TRUE;
+ ctx->need_prefix = TRUE;
+
+ h5tools_str_reset(buffer);
+ h5tools_str_append(buffer, "OPAQUE_TAG \"%s\";", ttag);
+ h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0);
+
+ H5free_memory(ttag);
- h5tools_str_reset(buffer);
- h5tools_str_append(buffer, "OPAQUE_TAG \"%s\";", ttag);
- h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0);
+ if((size = H5Tget_size(type)) <= 0) {
+ ctx->need_prefix = TRUE;
- H5free_memory(ttag);
+ h5tools_str_reset(buffer);
+ h5tools_str_append(buffer, "OPAQUE_SIZE \"%s\";", size);
+ h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0);
+ }
}
ctx->indent_level--;
diff --git a/tools/src/h5dump/h5dump_ddl.c b/tools/src/h5dump/h5dump_ddl.c
index e66a1c5..d7c7b1a 100644
--- a/tools/src/h5dump/h5dump_ddl.c
+++ b/tools/src/h5dump/h5dump_ddl.c
@@ -1943,9 +1943,6 @@ handle_links(hid_t fid, const char *links, void H5_ATTR_UNUSED * data, int H5_AT
break;
case H5L_TYPE_EXTERNAL:
- begin_obj(h5tools_dump_header_format->udlinkbegin, links, h5tools_dump_header_format->udlinkblockbegin);
- PRINTVALSTREAM(rawoutstream, "\n");
- indentation(COL);
begin_obj(h5tools_dump_header_format->extlinkbegin, links, h5tools_dump_header_format->extlinkblockbegin);
PRINTVALSTREAM(rawoutstream, "\n");
if(H5Lget_val(fid, links, buf, linfo.u.val_size, H5P_DEFAULT) >= 0) {
@@ -1954,8 +1951,6 @@ handle_links(hid_t fid, const char *links, void H5_ATTR_UNUSED * data, int H5_AT
if(H5Lunpack_elink_val(buf, linfo.u.val_size, NULL, &elink_file, &elink_path)>=0) {
indentation(COL);
- PRINTSTREAM(rawoutstream, "LINKCLASS %d\n", linfo.type);
- indentation(COL);
PRINTSTREAM(rawoutstream, "TARGETFILE \"%s\"\n", elink_file);
indentation(COL);
PRINTSTREAM(rawoutstream, "TARGETPATH \"%s\"\n", elink_path);
@@ -1979,9 +1974,6 @@ handle_links(hid_t fid, const char *links, void H5_ATTR_UNUSED * data, int H5_AT
begin_obj(h5tools_dump_header_format->udlinkbegin, links, h5tools_dump_header_format->udlinkblockbegin);
PRINTVALSTREAM(rawoutstream, "\n");
indentation(COL);
- begin_obj(h5tools_dump_header_format->udlinkbegin, links, h5tools_dump_header_format->udlinkblockbegin);
- PRINTVALSTREAM(rawoutstream, "\n");
- indentation(COL);
PRINTSTREAM(rawoutstream, "LINKCLASS %d\n", linfo.type);
end_obj(h5tools_dump_header_format->udlinkend, h5tools_dump_header_format->udlinkblockend);
break;
diff --git a/tools/test/perform/iopipe.c b/tools/test/perform/iopipe.c
index eac099b..2d9c44f 100644
--- a/tools/test/perform/iopipe.c
+++ b/tools/test/perform/iopipe.c
@@ -224,7 +224,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "fill raw");
+ HDfprintf (stderr, HEADING, "fill raw");
for(u = 0; u < nwrite; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -262,7 +262,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "fill hdf5");
+ HDfprintf (stderr, HEADING, "fill hdf5");
for(u = 0; u < nread; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -301,7 +301,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "out raw");
+ HDfprintf (stderr, HEADING, "out raw");
for(u = 0; u < nwrite; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -341,7 +341,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "out hdf5");
+ HDfprintf (stderr, HEADING, "out hdf5");
for(u = 0; u < nwrite; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -380,7 +380,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "in raw");
+ HDfprintf (stderr, HEADING, "in raw");
for(u = 0; u < nread; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -421,7 +421,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "in hdf5");
+ HDfprintf (stderr, HEADING, "in hdf5");
for(u = 0; u < nread; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
@@ -465,7 +465,7 @@ main (void)
_ftime(tbstart);
#endif
#endif
- fprintf (stderr, HEADING, "in hdf5 partial");
+ HDfprintf (stderr, HEADING, "in hdf5 partial");
for(u = 0; u < nread; u++) {
putc (PROGRESS, stderr);
HDfflush(stderr);
diff --git a/tools/test/perform/overhead.c b/tools/test/perform/overhead.c
index 81f9de6..108d9e4 100644
--- a/tools/test/perform/overhead.c
+++ b/tools/test/perform/overhead.c
@@ -84,9 +84,9 @@ typedef enum fill_t {
static void
usage(const char *prog)
{
- fprintf(stderr, "usage: %s [STYLE|cache] [LEFT [MIDDLE [RIGHT]]]\n",
+ HDfprintf(stderr, "usage: %s [STYLE|cache] [LEFT [MIDDLE [RIGHT]]]\n",
prog);
- fprintf(stderr, "\
+ HDfprintf(stderr, "\
STYLE is the order that the dataset is filled and should be one of:\n\
forward -- Fill the dataset from lowest address to highest\n\
address. This style tests the right split ratio.\n\
@@ -401,6 +401,6 @@ main(int argc, char *argv[])
return 0;
error:
- fprintf(stderr, "*** ERRORS DETECTED ***\n");
+ HDfprintf(stderr, "*** ERRORS DETECTED ***\n");
return 1;
}
diff --git a/tools/test/perform/perf_meta.c b/tools/test/perform/perf_meta.c
index c24e598..b56f074 100644
--- a/tools/test/perform/perf_meta.c
+++ b/tools/test/perform/perf_meta.c
@@ -313,7 +313,7 @@ create_dsets(hid_t file)
* Create a dataset using the default dataset creation properties.
*/
for(i = 0; i < NUM_DSETS; i++) {
- sprintf(dset_name, "dataset %d", i);
+ HDsprintf(dset_name, "dataset %d", i);
if((dataset = H5Dcreate2(file, dset_name, H5T_NATIVE_DOUBLE, space,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
@@ -378,14 +378,14 @@ create_attrs_1(void)
* Create all(user specifies the number) attributes for each dataset
*/
for(i = 0; i < NUM_DSETS; i++) {
- sprintf(dset_name, "dataset %d", i);
+ HDsprintf(dset_name, "dataset %d", i);
open_t.start = retrieve_time();
if((dataset = H5Dopen2(file, dset_name, H5P_DEFAULT)) < 0)
goto error;
perf(&open_t, open_t.start, retrieve_time());
for(j = 0; j < NUM_ATTRS; j++) {
- sprintf(attr_name, "all attrs for each dset %d", j);
+ HDsprintf(attr_name, "all attrs for each dset %d", j);
attr_t.start = retrieve_time();
if((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE,
small_space, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -422,7 +422,7 @@ create_attrs_1(void)
attr_t.avg = attr_t.total / (NUM_ATTRS*NUM_DSETS);
/* Print out the performance result */
- fprintf(stderr, "1. Create %d attributes for each of %d existing datasets\n",
+ HDfprintf(stderr, "1. Create %d attributes for each of %d existing datasets\n",
NUM_ATTRS, NUM_DSETS);
print_perf(open_t, close_t, attr_t);
}
@@ -480,7 +480,7 @@ create_attrs_2(void)
* Create all(user specifies the number) attributes for each new dataset
*/
for(i = 0; i < NUM_DSETS; i++) {
- sprintf(dset_name, "dataset %d", i);
+ HDsprintf(dset_name, "dataset %d", i);
create_t.start = retrieve_time();
if((dataset = H5Dcreate2(file, dset_name, H5T_NATIVE_DOUBLE,
space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -488,7 +488,7 @@ create_attrs_2(void)
perf(&create_t, create_t.start, retrieve_time());
for(j = 0; j < NUM_ATTRS; j++) {
- sprintf(attr_name, "all attrs for each dset %d", j);
+ HDsprintf(attr_name, "all attrs for each dset %d", j);
attr_t.start = retrieve_time();
if((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE,
small_space, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -525,7 +525,7 @@ create_attrs_2(void)
attr_t.avg = attr_t.total / (NUM_ATTRS*NUM_DSETS);
/* Print out the performance result */
- fprintf(stderr, "2. Create %d attributes for each of %d new datasets\n",
+ HDfprintf(stderr, "2. Create %d attributes for each of %d new datasets\n",
NUM_ATTRS, NUM_DSETS);
print_perf(create_t, close_t, attr_t);
}
@@ -593,14 +593,14 @@ create_attrs_3(void)
for(i = 0; i < loop_num; i++) {
for(j = 0; j < NUM_DSETS; j++) {
- sprintf(dset_name, "dataset %d", j);
+ HDsprintf(dset_name, "dataset %d", j);
open_t.start = retrieve_time();
if((dataset = H5Dopen2(file, dset_name, H5P_DEFAULT)) < 0)
goto error;
perf(&open_t, open_t.start, retrieve_time());
for(k = 0; k < BATCH_ATTRS; k++) {
- sprintf(attr_name, "some attrs for each dset %d %d", i, k);
+ HDsprintf(attr_name, "some attrs for each dset %d %d", i, k);
attr_t.start = retrieve_time();
if((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE,
small_space, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -637,7 +637,7 @@ create_attrs_3(void)
attr_t.avg = attr_t.total / (NUM_ATTRS*NUM_DSETS);
/* Print out the performance result */
- fprintf(stderr, "3. Create %d attributes for each of %d existing datasets for %d times\n",
+ HDfprintf(stderr, "3. Create %d attributes for each of %d existing datasets for %d times\n",
BATCH_ATTRS, NUM_DSETS, loop_num);
print_perf(open_t, close_t, attr_t);
}
@@ -750,12 +750,12 @@ void perf(p_time *perf_t, double start_t, double end_t)
*/
void print_perf(p_time open_t, p_time close_t, p_time attr_t)
{
- fprintf(stderr, "\t%s:\t\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
+ HDfprintf(stderr, "\t%s:\t\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
open_t.func, open_t.avg, open_t.max, open_t.min);
- fprintf(stderr, "\tH5Dclose:\t\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
+ HDfprintf(stderr, "\tH5Dclose:\t\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
close_t.avg, close_t.max, close_t.min);
if(NUM_ATTRS)
- fprintf(stderr, "\tH5A(create & close):\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
+ HDfprintf(stderr, "\tH5A(create & close):\tavg=%.6fs;\tmax=%.6fs;\tmin=%.6fs\n",
attr_t.avg, attr_t.max, attr_t.min);
}
@@ -799,7 +799,7 @@ main(int argc, char **argv)
#ifdef H5_HAVE_PARALLEL
if (facc_type == FACC_DEFAULT || (facc_type != FACC_DEFAULT && MAINPROCESS))
#endif /*H5_HAVE_PARALLEL*/
- fprintf(stderr, "\t\tPerformance result of metadata for datasets and attributes\n\n");
+ HDfprintf(stderr, "\t\tPerformance result of metadata for datasets and attributes\n\n");
fapl = H5Pcreate (H5P_FILE_ACCESS);
#ifdef H5_HAVE_PARALLEL
diff --git a/tools/test/perform/pio_engine.c b/tools/test/perform/pio_engine.c
index d47cd43..798e32e 100644
--- a/tools/test/perform/pio_engine.c
+++ b/tools/test/perform/pio_engine.c
@@ -61,14 +61,14 @@
#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
#define GOTODONE { goto done; }
#define ERRMSG(mesg) { \
- fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
- fprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
+ HDfprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
+ HDfprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
}
#define MSG(mesg) { \
- fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
- fprintf(stderr, "(%s) at line %4d in %s\n", \
+ HDfprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
+ HDfprintf(stderr, "(%s) at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
}
@@ -194,7 +194,7 @@ do_pio(parameters param)
break;
default:
/* unknown request */
- fprintf(stderr, "Unknown IO type request (%d)\n", iot);
+ HDfprintf(stderr, "Unknown IO type request (%d)\n", iot);
GOTOERROR(FAIL);
}
@@ -213,21 +213,21 @@ do_pio(parameters param)
}
if (param.num_files < 0 ) {
- fprintf(stderr,
+ HDfprintf(stderr,
"number of files must be >= 0 (%ld)\n",
param.num_files);
GOTOERROR(FAIL);
}
if (ndsets < 0 ) {
- fprintf(stderr,
+ HDfprintf(stderr,
"number of datasets per file must be >= 0 (%ld)\n",
ndsets);
GOTOERROR(FAIL);
}
if (param.num_procs <= 0 ) {
- fprintf(stderr,
+ HDfprintf(stderr,
"maximum number of process to use must be > 0 (%d)\n",
param.num_procs);
GOTOERROR(FAIL);
@@ -292,7 +292,7 @@ do_pio(parameters param)
/* output all of the times for all iterations */
if (myrank == 0)
- fprintf(output, "Timer details:\n");
+ HDfprintf(output, "Timer details:\n");
}
for (nf = 1; nf <= param.num_files; nf++) {
@@ -302,7 +302,7 @@ do_pio(parameters param)
/* Open file for write */
char base_name[256];
- sprintf(base_name, "#pio_tmp_%lu", nf);
+ HDsprintf(base_name, "#pio_tmp_%lu", nf);
pio_create_filename(iot, base_name, fname, sizeof(fname));
if (pio_debug_level > 0)
HDfprintf(output, "rank %d: data filename=%s\n",
@@ -385,7 +385,7 @@ done:
/* release generic resources */
if(buffer)
- free(buffer);
+ HDfree(buffer);
res.ret_code = ret_code;
return res;
}
@@ -620,7 +620,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Prepare buffer for verifying data */
if (parms->verify)
- memset(buffer,pio_mpi_rank_g+1,buf_size*blk_size);
+ HDmemset(buffer,pio_mpi_rank_g+1,buf_size*blk_size);
} /* end else */
@@ -853,7 +853,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Create the dataset transfer property list */
h5dxpl = H5Pcreate(H5P_DATASET_XFER);
if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
@@ -861,7 +861,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
if(parms->collective) {
hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
} /* end if */
} /* end if */
@@ -883,7 +883,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
case PHDF5:
h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
if (h5dcpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
/* 1D dataspace */
@@ -894,7 +894,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
h5dims[0] = blk_size;
hrc = H5Pset_chunk(h5dcpl, 1, h5dims);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
} /* end if */
} /* end if */
@@ -907,25 +907,25 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
h5dims[1] = blk_size;
hrc = H5Pset_chunk(h5dcpl, 2, h5dims);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
} /* end if */
} /* end if */
}/* end else */
- sprintf(dname, "Dataset_%ld", ndset);
+ HDsprintf(dname, "Dataset_%ld", ndset);
h5ds_id = H5DCREATE(fd->h5fd, dname, ELMT_H5_TYPE,
h5dset_space_id, h5dcpl);
if (h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset Create failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Create failed\n");
GOTOERROR(FAIL);
}
hrc = H5Pclose(h5dcpl);
/* verifying the close of the dcpl */
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Property List Close failed\n");
GOTOERROR(FAIL);
}
break;
@@ -1398,7 +1398,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
hrc = H5Dclose(h5ds_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Close failed\n");
GOTOERROR(FAIL);
}
@@ -1455,7 +1455,7 @@ done:
if (h5dset_space_id != -1) {
hrc = H5Sclose(h5dset_space_id);
if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Space Close failed\n");
ret_code = FAIL;
} else {
h5dset_space_id = -1;
@@ -1465,7 +1465,7 @@ done:
if (h5mem_space_id != -1) {
hrc = H5Sclose(h5mem_space_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Memory Space Close failed\n");
ret_code = FAIL;
} else {
h5mem_space_id = -1;
@@ -1475,7 +1475,7 @@ done:
if (h5dxpl != -1) {
hrc = H5Pclose(h5dxpl);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
ret_code = FAIL;
} else {
h5dxpl = -1;
@@ -1824,7 +1824,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Create the dataset transfer property list */
h5dxpl = H5Pcreate(H5P_DATASET_XFER);
if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
@@ -1832,7 +1832,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
if(parms->collective) {
hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
} /* end if */
} /* end if */
@@ -1852,10 +1852,10 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
break;
case PHDF5:
- sprintf(dname, "Dataset_%ld", ndset);
+ HDsprintf(dname, "Dataset_%ld", ndset);
h5ds_id = H5DOPEN(fd->h5fd, dname);
if (h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset open failed\n");
+ HDfprintf(stderr, "HDF5 Dataset open failed\n");
GOTOERROR(FAIL);
}
@@ -2353,7 +2353,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
hrc = H5Dclose(h5ds_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Close failed\n");
GOTOERROR(FAIL);
}
@@ -2410,7 +2410,7 @@ done:
if (h5dset_space_id != -1) {
hrc = H5Sclose(h5dset_space_id);
if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Space Close failed\n");
ret_code = FAIL;
} else {
h5dset_space_id = -1;
@@ -2420,7 +2420,7 @@ done:
if (h5mem_space_id != -1) {
hrc = H5Sclose(h5mem_space_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Memory Space Close failed\n");
ret_code = FAIL;
} else {
h5mem_space_id = -1;
@@ -2430,7 +2430,7 @@ done:
if (h5dxpl != -1) {
hrc = H5Pclose(h5dxpl);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
ret_code = FAIL;
} else {
h5dxpl = -1;
@@ -2461,7 +2461,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
fd->posixfd = POSIXOPEN(fname, O_RDONLY);
if (fd->posixfd < 0 ) {
- fprintf(stderr, "POSIX File Open failed(%s)\n", fname);
+ HDfprintf(stderr, "POSIX File Open failed(%s)\n", fname);
GOTOERROR(FAIL);
}
@@ -2485,7 +2485,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
h5_io_info_g, &fd->mpifd);
if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ HDfprintf(stderr, "MPI File Open failed(%s)\n", fname);
GOTOERROR(FAIL);
}
@@ -2493,13 +2493,13 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
/*filesize , set size to 0 explicitedly. */
mrc = MPI_File_set_size(fd->mpifd, (MPI_Offset)0);
if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI_File_set_size failed\n");
+ HDfprintf(stderr, "MPI_File_set_size failed\n");
GOTOERROR(FAIL);
}
} else {
mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_RDONLY, h5_io_info_g, &fd->mpifd);
if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ HDfprintf(stderr, "MPI File Open failed(%s)\n", fname);
GOTOERROR(FAIL);
}
}
@@ -2508,19 +2508,19 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
case PHDF5:
if ((acc_tpl = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
/* Set the file driver to the MPI-IO driver */
if (H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g) < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
}
/* Set the alignment of objects in HDF5 file */
if (H5Pset_alignment(acc_tpl, param->h5_thresh, param->h5_align) < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
}
@@ -2530,13 +2530,13 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
else
fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, acc_tpl);
if (fd->h5fd < 0) {
- fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
+ HDfprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
GOTOERROR(FAIL);
}
/* verifying the close of the acc_tpl */
if (H5Pclose(acc_tpl) < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Property List Close failed\n");
GOTOERROR(FAIL);
}
@@ -2565,7 +2565,7 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
rc = POSIXCLOSE(fd->posixfd);
if (rc != 0){
- fprintf(stderr, "POSIX File Close failed\n");
+ HDfprintf(stderr, "POSIX File Close failed\n");
GOTOERROR(FAIL);
}
@@ -2576,7 +2576,7 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
mrc = MPI_File_close(&fd->mpifd);
if (mrc != MPI_SUCCESS){
- fprintf(stderr, "MPI File close failed\n");
+ HDfprintf(stderr, "MPI File close failed\n");
GOTOERROR(FAIL);
}
@@ -2587,7 +2587,7 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
hrc = H5Fclose(fd->h5fd);
if (hrc < 0) {
- fprintf(stderr, "HDF5 File Close failed\n");
+ HDfprintf(stderr, "HDF5 File Close failed\n");
GOTOERROR(FAIL);
}
@@ -2621,7 +2621,7 @@ do_cleanupfile(iotype iot, char *fname)
if (clean_file_g){
switch (iot){
case POSIXIO:
- remove(fname);
+ HDremove(fname);
break;
case MPIO:
case PHDF5:
diff --git a/tools/test/perform/pio_perf.c b/tools/test/perform/pio_perf.c
index 597629e..93741c4 100644
--- a/tools/test/perform/pio_perf.c
+++ b/tools/test/perform/pio_perf.c
@@ -277,13 +277,13 @@ struct options {
unsigned interleaved; /* Interleaved vs. contiguous blocks */
unsigned collective; /* Collective vs. independent I/O */
unsigned dim2d; /* 1D vs. 2D geometry */
- int print_times; /* print times as well as throughputs */
- int print_raw; /* print raw data throughput info */
+ int print_times; /* print times as well as throughputs */
+ int print_raw; /* print raw data throughput info */
off_t h5_alignment; /* alignment in HDF5 file */
off_t h5_threshold; /* threshold for alignment in HDF5 file */
- int h5_use_chunks; /* Make HDF5 dataset chunked */
- int h5_write_only; /* Perform the write tests only */
- int verify; /* Verify data correctness */
+ int h5_use_chunks; /* Make HDF5 dataset chunked */
+ int h5_write_only; /* Perform the write tests only */
+ int verify; /* Verify data correctness */
};
typedef struct _minmax {
@@ -339,12 +339,12 @@ main(int argc, char **argv)
ret = MPI_Comm_size(MPI_COMM_WORLD, &comm_world_nprocs_g);
if (ret != MPI_SUCCESS) {
- fprintf(stderr, "%s: MPI_Comm_size call failed\n", progname);
+ HDfprintf(stderr, "%s: MPI_Comm_size call failed\n", progname);
if (ret == MPI_ERR_COMM)
- fprintf(stderr, "invalid MPI communicator\n");
+ HDfprintf(stderr, "invalid MPI communicator\n");
else
- fprintf(stderr, "invalid argument\n");
+ HDfprintf(stderr, "invalid argument\n");
exit_value = EXIT_FAILURE;
goto finish;
@@ -353,12 +353,12 @@ main(int argc, char **argv)
ret = MPI_Comm_rank(MPI_COMM_WORLD, &comm_world_rank_g);
if (ret != MPI_SUCCESS) {
- fprintf(stderr, "%s: MPI_Comm_rank call failed\n", progname);
+ HDfprintf(stderr, "%s: MPI_Comm_rank call failed\n", progname);
if (ret == MPI_ERR_COMM)
- fprintf(stderr, "invalid MPI communicator\n");
+ HDfprintf(stderr, "invalid MPI communicator\n");
else
- fprintf(stderr, "invalid argument\n");
+ HDfprintf(stderr, "invalid argument\n");
exit_value = EXIT_FAILURE;
goto finish;
@@ -376,7 +376,7 @@ main(int argc, char **argv)
if (opts->output_file) {
if ((output = HDfopen(opts->output_file, "w")) == NULL) {
- fprintf(stderr, "%s: cannot open output file\n", progname);
+ HDfprintf(stderr, "%s: cannot open output file\n", progname);
perror(opts->output_file);
goto finish;
}
@@ -667,7 +667,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
/*
* Show various statistics
*/
- /* Write statistics */
+ /* Write statistics */
/* Print the raw data throughput if desired */
if (opts->print_raw) {
/* accumulate and output the max, min, and average "raw write" times */
@@ -733,7 +733,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
}
if (!parms.h5_write_only) {
- /* Read statistics */
+ /* Read statistics */
/* Print the raw data throughput if desired */
if (opts->print_raw) {
/* accumulate and output the max, min, and average "raw read" times */
@@ -925,7 +925,7 @@ create_comm_world(int num_procs, int *doing_pio)
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (num_procs > nprocs) {
- fprintf(stderr,
+ HDfprintf(stderr,
"number of process(%d) must be <= number of processes in MPI_COMM_WORLD(%d)\n",
num_procs, nprocs);
goto error_done;
@@ -936,7 +936,7 @@ create_comm_world(int num_procs, int *doing_pio)
mrc = MPI_Comm_split(MPI_COMM_WORLD, color, myrank, &pio_comm_g);
if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI_Comm_split failed\n");
+ HDfprintf(stderr, "MPI_Comm_split failed\n");
goto error_done;
}
@@ -1065,9 +1065,9 @@ output_report(const char *fmt, ...)
if (myrank == 0) {
va_list ap;
- va_start(ap, fmt);
- vfprintf(output, fmt, ap);
- va_end(ap);
+ HDva_start(ap, fmt);
+ HDvfprintf(output, fmt, ap);
+ HDva_end(ap);
}
}
@@ -1087,10 +1087,10 @@ print_indent(register int indent)
MPI_Comm_rank(pio_comm_g, &myrank);
if (myrank == 0) {
- indent *= TAB_SPACE;
+ indent *= TAB_SPACE;
- for (; indent > 0; --indent)
- fputc(' ', output);
+ for (; indent > 0; --indent)
+ HDfputc(' ', output);
}
}
@@ -1115,11 +1115,11 @@ static void
print_io_api(long io_types)
{
if (io_types & PIO_POSIX)
- HDfprintf(output, "posix ");
+ HDfprintf(output, "posix ");
if (io_types & PIO_MPI)
- HDfprintf(output, "mpiio ");
+ HDfprintf(output, "mpiio ");
if (io_types & PIO_HDF5)
- HDfprintf(output, "phdf5 ");
+ HDfprintf(output, "phdf5 ");
HDfprintf(output, "\n");
}
@@ -1128,7 +1128,7 @@ report_parameters(struct options *opts)
{
int rank = comm_world_rank_g;
- print_version("HDF5 Library"); /* print library version */
+ print_version("HDF5 Library"); /* print library version */
HDfprintf(output, "rank %d: ==== Parameters ====\n", rank);
HDfprintf(output, "rank %d: IO API=", rank);
@@ -1303,9 +1303,9 @@ parse_command_line(int argc, char *argv[])
} else if (!HDstrcasecmp(buf, "posix")) {
cl_opts->io_types |= PIO_POSIX;
} else {
- fprintf(stderr, "pio_perf: invalid --api option %s\n",
+ HDfprintf(stderr, "pio_perf: invalid --api option %s\n",
buf);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
if (*end == '\0')
@@ -1345,17 +1345,17 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
- if (strlen(buf) > 1 || isdigit(buf[0])) {
+ if (HDstrlen(buf) > 1 || HDisdigit(buf[0])) {
size_t j;
for (j = 0; j < 10 && buf[j] != '\0'; ++j)
if (!isdigit(buf[j])) {
- fprintf(stderr, "pio_perf: invalid --debug option %s\n",
+ HDfprintf(stderr, "pio_perf: invalid --debug option %s\n",
buf);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
pio_debug_level = atoi(buf);
@@ -1379,8 +1379,8 @@ parse_command_line(int argc, char *argv[])
cl_opts->verify = TRUE;
break;
default:
- fprintf(stderr, "pio_perf: invalid --debug option %s\n", buf);
- exit(EXIT_FAILURE);
+ HDfprintf(stderr, "pio_perf: invalid --debug option %s\n", buf);
+ HDexit(EXIT_FAILURE);
}
}
@@ -1396,13 +1396,13 @@ parse_command_line(int argc, char *argv[])
cl_opts->num_bpp = parse_size_directive(opt_arg);
break;
case 'F':
- cl_opts->num_files = atoi(opt_arg);
+ cl_opts->num_files = HDatoi(opt_arg);
break;
case 'g':
cl_opts->dim2d = 1;
break;
case 'i':
- cl_opts->num_iters = atoi(opt_arg);
+ cl_opts->num_iters = HDatoi(opt_arg);
break;
case 'I':
cl_opts->interleaved = 1;
@@ -1411,10 +1411,10 @@ parse_command_line(int argc, char *argv[])
cl_opts->output_file = opt_arg;
break;
case 'p':
- cl_opts->min_num_procs = atoi(opt_arg);
+ cl_opts->min_num_procs = HDatoi(opt_arg);
break;
case 'P':
- cl_opts->max_num_procs = atoi(opt_arg);
+ cl_opts->max_num_procs = HDatoi(opt_arg);
break;
case 'T':
cl_opts->h5_threshold = parse_size_directive(opt_arg);
@@ -1432,7 +1432,7 @@ parse_command_line(int argc, char *argv[])
case '?':
default:
usage(progname);
- free(cl_opts);
+ HDfree(cl_opts);
return NULL;
}
}
@@ -1526,8 +1526,8 @@ parse_size_directive(const char *size)
s *= ONE_GB;
break;
default:
- fprintf(stderr, "Illegal size specifier '%c'\n", *endptr);
- exit(EXIT_FAILURE);
+ HDfprintf(stderr, "Illegal size specifier '%c'\n", *endptr);
+ HDexit(EXIT_FAILURE);
}
}
@@ -1540,7 +1540,7 @@ parse_size_directive(const char *size)
* Return: Nothing
* Programmer: Bill Wendling, 31. October 2001
* Modifications:
- * Added 2D testing (Christian Chilan, 10. August 2005)
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
static void
usage(const char *prog)
@@ -1550,125 +1550,125 @@ usage(const char *prog)
MPI_Comm_rank(pio_comm_g, &myrank);
if (myrank == 0) {
- print_version(prog);
- printf("usage: %s [OPTIONS]\n", prog);
- printf(" OPTIONS\n");
- printf(" -h, --help Print a usage message and exit\n");
- printf(" -a S, --align=S Alignment of objects in HDF5 file [default: 1]\n");
- printf(" -A AL, --api=AL Which APIs to test [default: all of them]\n");
+ print_version(prog);
+ HDprintf("usage: %s [OPTIONS]\n", prog);
+ HDprintf(" OPTIONS\n");
+ HDprintf(" -h, --help Print a usage message and exit\n");
+ HDprintf(" -a S, --align=S Alignment of objects in HDF5 file [default: 1]\n");
+ HDprintf(" -A AL, --api=AL Which APIs to test [default: all of them]\n");
#if 0
- printf(" -b, --binary The elusive binary option\n");
+ HDprintf(" -b, --binary The elusive binary option\n");
#endif /* 0 */
- printf(" -B S, --block-size=S Block size within transfer buffer\n");
- printf(" (see below for description)\n");
- printf(" [default: half the number of bytes per process\n");
- printf(" per dataset]\n");
- printf(" -c, --chunk Create HDF5 datasets using chunked storage\n");
- printf(" [default: contiguous storage]\n");
- printf(" -C, --collective Use collective I/O for MPI and HDF5 APIs\n");
- printf(" [default: independent I/O)\n");
- printf(" -d N, --num-dsets=N Number of datasets per file [default: 1]\n");
- printf(" -D DL, --debug=DL Indicate the debugging level\n");
- printf(" [default: no debugging]\n");
- printf(" -e S, --num-bytes=S Number of bytes per process per dataset\n");
- printf(" (see below for description)\n");
- printf(" [default: 256K for 1D, 8K for 2D]\n");
- printf(" -F N, --num-files=N Number of files [default: 1]\n");
- printf(" -g, --geometry Use 2D geometry [default: 1D geometry]\n");
- printf(" -i N, --num-iterations=N Number of iterations to perform [default: 1]\n");
- printf(" -I, --interleaved Interleaved access pattern\n");
- printf(" (see below for example)\n");
- printf(" [default: Contiguous access pattern]\n");
- printf(" -o F, --output=F Output raw data into file F [default: none]\n");
- printf(" -p N, --min-num-processes=N Minimum number of processes to use [default: 1]\n");
- printf(" -P N, --max-num-processes=N Maximum number of processes to use\n");
- printf(" [default: all MPI_COMM_WORLD processes ]\n");
- printf(" -T S, --threshold=S Threshold for alignment of objects in HDF5 file\n");
- printf(" [default: 1]\n");
- printf(" -w, --write-only Perform write tests not the read tests\n");
- printf(" -x S, --min-xfer-size=S Minimum transfer buffer size\n");
- printf(" (see below for description)\n");
- printf(" [default: half the number of bytes per process\n");
- printf(" per dataset]\n");
- printf(" -X S, --max-xfer-size=S Maximum transfer buffer size\n");
- printf(" [default: the number of bytes per process per\n");
- printf(" dataset]\n");
- printf("\n");
- printf(" F - is a filename.\n");
- printf(" N - is an integer >=0.\n");
- printf(" S - is a size specifier, an integer >=0 followed by a size indicator:\n");
- printf(" K - Kilobyte (%d)\n", ONE_KB);
- printf(" M - Megabyte (%d)\n", ONE_MB);
- printf(" G - Gigabyte (%d)\n", ONE_GB);
- printf("\n");
- printf(" Example: '37M' is 37 megabytes or %d bytes\n", 37*ONE_MB);
- printf("\n");
- printf(" AL - is an API list. Valid values are:\n");
- printf(" phdf5 - Parallel HDF5\n");
- printf(" mpiio - MPI-I/O\n");
- printf(" posix - POSIX\n");
- printf("\n");
- printf(" Example: --api=mpiio,phdf5\n");
- printf("\n");
- printf(" Dataset size:\n");
- printf(" Depending on the selected geometry, each test dataset is either a linear\n");
- printf(" array of size bytes-per-process * num-processes, or a square array of size\n");
- printf(" (bytes-per-process * num-processes) x (bytes-per-process * num-processes).\n");
- printf("\n");
- printf(" Block size vs. Transfer buffer size:\n");
- printf(" buffer-size controls the size of the memory buffer, which is broken into\n");
- printf(" blocks and written to the file. Depending on the selected geometry, each\n");
- printf(" block can be a linear array of size block-size or a square array of size\n");
- printf(" block-size x block-size. The arrangement in which blocks are written is\n");
- printf(" determined by the access pattern.\n");
- printf("\n");
- printf(" In 1D geometry, the transfer buffer is a linear array of size buffer-size.\n");
- printf(" In 2D geometry, it is a rectangular array of size block-size x buffer-size\n");
- printf(" or buffer-size x block-size if interleaved pattern is selected.\n");
- printf("\n");
- printf(" Interleaved and Contiguous patterns in 1D geometry:\n");
- printf(" When contiguous access pattern is chosen, the dataset is evenly divided\n");
- printf(" into num-processes regions and each process writes data to its own region.\n");
- printf(" When interleaved blocks are written to a dataset, space for the first\n");
- printf(" block of the first process is allocated in the dataset, then space is\n");
- printf(" allocated for the first block of the second process, etc. until space is\n");
- printf(" allocated for the first block of each process, then space is allocated for\n");
- printf(" the second block of the first process, the second block of the second\n");
- printf(" process, etc.\n");
- printf("\n");
- printf(" For example, with a 3 process run, 512KB bytes-per-process, 256KB transfer\n");
- printf(" buffer size, and 64KB block size, each process must issue 2 transfer\n");
- printf(" requests to complete access to the dataset.\n");
- printf(" Contiguous blocks of the first transfer request are written like so:\n");
- printf(" 1111----2222----3333----\n");
- printf(" Interleaved blocks of the first transfer request are written like so:\n");
- printf(" 123123123123------------\n");
- printf(" The actual number of I/O operations involved in a transfer request\n");
- printf(" depends on the access pattern and communication mode.\n");
- printf(" When using independent I/O with interleaved pattern, each process\n");
- printf(" performs 4 small non-contiguous I/O operations per transfer request.\n");
- printf(" If collective I/O is turned on, the combined content of the buffers of\n");
- printf(" the 3 processes will be written using one collective I/O operation\n");
- printf(" per transfer request.\n");
- printf("\n");
- printf(" For information about access patterns in 2D geometry, please refer to the\n");
- printf(" HDF5 Reference Manual.\n");
- printf("\n");
- printf(" DL - is a list of debugging flags. Valid values are:\n");
- printf(" 1 - Minimal\n");
- printf(" 2 - Not quite everything\n");
- printf(" 3 - Everything\n");
- printf(" 4 - The kitchen sink\n");
- printf(" r - Raw data I/O throughput information\n");
- printf(" t - Times as well as throughputs\n");
- printf(" v - Verify data correctness\n");
- printf("\n");
- printf(" Example: --debug=2,r,t\n");
- printf("\n");
- printf(" Environment variables:\n");
- printf(" HDF5_NOCLEANUP Do not remove data files if set [default remove]\n");
- printf(" HDF5_MPI_INFO MPI INFO object key=value separated by ;\n");
- printf(" HDF5_PARAPREFIX Paralllel data files prefix\n");
+ HDprintf(" -B S, --block-size=S Block size within transfer buffer\n");
+ HDprintf(" (see below for description)\n");
+ HDprintf(" [default: half the number of bytes per process\n");
+ HDprintf(" per dataset]\n");
+ HDprintf(" -c, --chunk Create HDF5 datasets using chunked storage\n");
+ HDprintf(" [default: contiguous storage]\n");
+ HDprintf(" -C, --collective Use collective I/O for MPI and HDF5 APIs\n");
+ HDprintf(" [default: independent I/O)\n");
+ HDprintf(" -d N, --num-dsets=N Number of datasets per file [default: 1]\n");
+ HDprintf(" -D DL, --debug=DL Indicate the debugging level\n");
+ HDprintf(" [default: no debugging]\n");
+ HDprintf(" -e S, --num-bytes=S Number of bytes per process per dataset\n");
+ HDprintf(" (see below for description)\n");
+ HDprintf(" [default: 256K for 1D, 8K for 2D]\n");
+ HDprintf(" -F N, --num-files=N Number of files [default: 1]\n");
+ HDprintf(" -g, --geometry Use 2D geometry [default: 1D geometry]\n");
+ HDprintf(" -i N, --num-iterations=N Number of iterations to perform [default: 1]\n");
+ HDprintf(" -I, --interleaved Interleaved access pattern\n");
+ HDprintf(" (see below for example)\n");
+ HDprintf(" [default: Contiguous access pattern]\n");
+ HDprintf(" -o F, --output=F Output raw data into file F [default: none]\n");
+ HDprintf(" -p N, --min-num-processes=N Minimum number of processes to use [default: 1]\n");
+ HDprintf(" -P N, --max-num-processes=N Maximum number of processes to use\n");
+ HDprintf(" [default: all MPI_COMM_WORLD processes ]\n");
+ HDprintf(" -T S, --threshold=S Threshold for alignment of objects in HDF5 file\n");
+ HDprintf(" [default: 1]\n");
+ HDprintf(" -w, --write-only Perform write tests not the read tests\n");
+ HDprintf(" -x S, --min-xfer-size=S Minimum transfer buffer size\n");
+ HDprintf(" (see below for description)\n");
+ HDprintf(" [default: half the number of bytes per process\n");
+ HDprintf(" per dataset]\n");
+ HDprintf(" -X S, --max-xfer-size=S Maximum transfer buffer size\n");
+ HDprintf(" [default: the number of bytes per process per\n");
+ HDprintf(" dataset]\n");
+ HDprintf("\n");
+ HDprintf(" F - is a filename.\n");
+ HDprintf(" N - is an integer >=0.\n");
+ HDprintf(" S - is a size specifier, an integer >=0 followed by a size indicator:\n");
+ HDprintf(" K - Kilobyte (%d)\n", ONE_KB);
+ HDprintf(" M - Megabyte (%d)\n", ONE_MB);
+ HDprintf(" G - Gigabyte (%d)\n", ONE_GB);
+ HDprintf("\n");
+ HDprintf(" Example: '37M' is 37 megabytes or %d bytes\n", 37*ONE_MB);
+ HDprintf("\n");
+ HDprintf(" AL - is an API list. Valid values are:\n");
+ HDprintf(" phdf5 - Parallel HDF5\n");
+ HDprintf(" mpiio - MPI-I/O\n");
+ HDprintf(" posix - POSIX\n");
+ HDprintf("\n");
+ HDprintf(" Example: --api=mpiio,phdf5\n");
+ HDprintf("\n");
+ HDprintf(" Dataset size:\n");
+ HDprintf(" Depending on the selected geometry, each test dataset is either a linear\n");
+ HDprintf(" array of size bytes-per-process * num-processes, or a square array of size\n");
+ HDprintf(" (bytes-per-process * num-processes) x (bytes-per-process * num-processes).\n");
+ HDprintf("\n");
+ HDprintf(" Block size vs. Transfer buffer size:\n");
+ HDprintf(" buffer-size controls the size of the memory buffer, which is broken into\n");
+ HDprintf(" blocks and written to the file. Depending on the selected geometry, each\n");
+ HDprintf(" block can be a linear array of size block-size or a square array of size\n");
+ HDprintf(" block-size x block-size. The arrangement in which blocks are written is\n");
+ HDprintf(" determined by the access pattern.\n");
+ HDprintf("\n");
+ HDprintf(" In 1D geometry, the transfer buffer is a linear array of size buffer-size.\n");
+ HDprintf(" In 2D geometry, it is a rectangular array of size block-size x buffer-size\n");
+ HDprintf(" or buffer-size x block-size if interleaved pattern is selected.\n");
+ HDprintf("\n");
+ HDprintf(" Interleaved and Contiguous patterns in 1D geometry:\n");
+ HDprintf(" When contiguous access pattern is chosen, the dataset is evenly divided\n");
+ HDprintf(" into num-processes regions and each process writes data to its own region.\n");
+ HDprintf(" When interleaved blocks are written to a dataset, space for the first\n");
+ HDprintf(" block of the first process is allocated in the dataset, then space is\n");
+ HDprintf(" allocated for the first block of the second process, etc. until space is\n");
+ HDprintf(" allocated for the first block of each process, then space is allocated for\n");
+ HDprintf(" the second block of the first process, the second block of the second\n");
+ HDprintf(" process, etc.\n");
+ HDprintf("\n");
+ HDprintf(" For example, with a 3 process run, 512KB bytes-per-process, 256KB transfer\n");
+ HDprintf(" buffer size, and 64KB block size, each process must issue 2 transfer\n");
+ HDprintf(" requests to complete access to the dataset.\n");
+ HDprintf(" Contiguous blocks of the first transfer request are written like so:\n");
+ HDprintf(" 1111----2222----3333----\n");
+ HDprintf(" Interleaved blocks of the first transfer request are written like so:\n");
+ HDprintf(" 123123123123------------\n");
+ HDprintf(" The actual number of I/O operations involved in a transfer request\n");
+ HDprintf(" depends on the access pattern and communication mode.\n");
+ HDprintf(" When using independent I/O with interleaved pattern, each process\n");
+ HDprintf(" performs 4 small non-contiguous I/O operations per transfer request.\n");
+ HDprintf(" If collective I/O is turned on, the combined content of the buffers of\n");
+ HDprintf(" the 3 processes will be written using one collective I/O operation\n");
+ HDprintf(" per transfer request.\n");
+ HDprintf("\n");
+ HDprintf(" For information about access patterns in 2D geometry, please refer to the\n");
+ HDprintf(" HDF5 Reference Manual.\n");
+ HDprintf("\n");
+ HDprintf(" DL - is a list of debugging flags. Valid values are:\n");
+ HDprintf(" 1 - Minimal\n");
+ HDprintf(" 2 - Not quite everything\n");
+ HDprintf(" 3 - Everything\n");
+ HDprintf(" 4 - The kitchen sink\n");
+ HDprintf(" r - Raw data I/O throughput information\n");
+ HDprintf(" t - Times as well as throughputs\n");
+ HDprintf(" v - Verify data correctness\n");
+ HDprintf("\n");
+ HDprintf(" Example: --debug=2,r,t\n");
+ HDprintf("\n");
+ HDprintf(" Environment variables:\n");
+ HDprintf(" HDF5_NOCLEANUP Do not remove data files if set [default remove]\n");
+ HDprintf(" HDF5_MPI_INFO MPI INFO object key=value separated by ;\n");
+ HDprintf(" HDF5_PARAPREFIX Paralllel data files prefix\n");
fflush(stdout);
} /* end if */
} /* end usage() */
@@ -1685,7 +1685,7 @@ usage(const char *prog)
int
main(void)
{
- printf("No parallel IO performance because parallel is not configured\n");
+ HDprintf("No parallel IO performance because parallel is not configured\n");
return EXIT_SUCCESS;
} /* end main */
diff --git a/tools/test/perform/pio_standalone.h b/tools/test/perform/pio_standalone.h
index a40cff8..e6db2e8 100644
--- a/tools/test/perform/pio_standalone.h
+++ b/tools/test/perform/pio_standalone.h
@@ -294,6 +294,7 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
#define HDpipe(F) pipe(F)
#define HDpow(X,Y) pow(X,Y)
/* printf() variable arguments */
+#define HDprintf(...) HDfprintf(stdout, __VA_ARGS__)
#define HDputc(C,F) putc(C,F)
#define HDputchar(C) putchar(C)
#define HDputs(S) puts(S)
@@ -355,6 +356,7 @@ H5_DLL int c99_snprintf(char* str, size_t size, const char* format, ...);
#define HDsnprintf snprintf /*varargs*/
#endif
/* sprintf() variable arguments */
+#define HDsprintf sprintf /*varargs*/
#define HDsqrt(X) sqrt(X)
#ifdef H5_HAVE_RAND_R
H5_DLL void HDsrand(unsigned int seed);
diff --git a/tools/test/perform/sio_engine.c b/tools/test/perform/sio_engine.c
index aefd2b3..488f575 100644
--- a/tools/test/perform/sio_engine.c
+++ b/tools/test/perform/sio_engine.c
@@ -41,7 +41,7 @@
#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
#define ERRMSG(mesg) { \
- fprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
+ HDfprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
}
@@ -198,7 +198,7 @@ do_sio(parameters param, results *res)
if (sio_debug_level >= 4)
/* output all of the times for all iterations */
- fprintf(output, "Timer details:\n");
+ HDfprintf(output, "Timer details:\n");
/*
* Write performance measurement
@@ -490,7 +490,7 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
/* Create the dataset transfer property list */
h5dxpl = H5Pcreate(H5P_DATASET_XFER);
if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
@@ -512,7 +512,7 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
if (h5dcpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
@@ -520,12 +520,12 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
/* Set the chunk size to be the same as the buffer size */
hrc = H5Pset_chunk(h5dcpl, rank, h5chunk);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
+ HDfprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
} /* end if */
} /* end if */
- sprintf(dname, "Dataset_%ld", (unsigned long)parms->num_bytes);
+ HDsprintf(dname, "Dataset_%ld", (unsigned long)parms->num_bytes);
h5ds_id = H5Dcreate2(fd->h5fd, dname, ELMT_H5_TYPE,
h5dset_space_id, H5P_DEFAULT, h5dcpl, H5P_DEFAULT);
@@ -556,7 +556,7 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
hrc = dset_write(rank-1, fd, parms, buffer);
if (hrc < 0) {
- fprintf(stderr, "Error in dataset write\n");
+ HDfprintf(stderr, "Error in dataset write\n");
GOTOERROR(FAIL);
}
@@ -571,7 +571,7 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
hrc = H5Dclose(h5ds_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Close failed\n");
GOTOERROR(FAIL);
}
@@ -584,7 +584,7 @@ done:
if (h5dset_space_id != -1) {
hrc = H5Sclose(h5dset_space_id);
if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Space Close failed\n");
ret_code = FAIL;
} else {
h5dset_space_id = -1;
@@ -594,7 +594,7 @@ done:
if (h5mem_space_id != -1) {
hrc = H5Sclose(h5mem_space_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Memory Space Close failed\n");
ret_code = FAIL;
} else {
h5mem_space_id = -1;
@@ -604,7 +604,7 @@ done:
if (h5dxpl != -1) {
hrc = H5Pclose(h5dxpl);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
ret_code = FAIL;
} else {
h5dxpl = -1;
@@ -837,7 +837,7 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer)
/* Create the dataset transfer property list */
h5dxpl = H5Pcreate(H5P_DATASET_XFER);
if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
break;
@@ -856,7 +856,7 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer)
break;
case HDF5:
- sprintf(dname, "Dataset_%ld", (long)parms->num_bytes);
+ HDsprintf(dname, "Dataset_%ld", (long)parms->num_bytes);
h5ds_id = H5Dopen2(fd->h5fd, dname, H5P_DEFAULT);
if (h5ds_id < 0) {
HDfprintf(stderr, "HDF5 Dataset open failed\n");
@@ -876,7 +876,7 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer)
hrc = dset_read(rank-1, fd, parms, buffer, buffer2);
if (hrc < 0) {
- fprintf(stderr, "Error in dataset read\n");
+ HDfprintf(stderr, "Error in dataset read\n");
GOTOERROR(FAIL);
}
@@ -890,7 +890,7 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer)
hrc = H5Dclose(h5ds_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Close failed\n");
GOTOERROR(FAIL);
}
@@ -903,7 +903,7 @@ done:
if (h5dset_space_id != -1) {
hrc = H5Sclose(h5dset_space_id);
if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Space Close failed\n");
ret_code = FAIL;
} else {
h5dset_space_id = -1;
@@ -913,7 +913,7 @@ done:
if (h5mem_space_id != -1) {
hrc = H5Sclose(h5mem_space_id);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ HDfprintf(stderr, "HDF5 Memory Space Close failed\n");
ret_code = FAIL;
} else {
h5mem_space_id = -1;
@@ -923,7 +923,7 @@ done:
if (h5dxpl != -1) {
hrc = H5Pclose(h5dxpl);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ HDfprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
ret_code = FAIL;
} else {
h5dxpl = -1;
@@ -1089,7 +1089,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
fapl = set_vfd(param);
if (fapl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
+ HDfprintf(stderr, "HDF5 Property List Create failed\n");
GOTOERROR(FAIL);
}
@@ -1110,7 +1110,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
if (fd->h5fd < 0) {
- fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
+ HDfprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
GOTOERROR(FAIL);
}
break;
@@ -1176,7 +1176,7 @@ set_vfd(parameters *param)
HDassert(HDstrlen(multi_letters)==H5FD_MEM_NTYPES);
for (mt=H5FD_MEM_DEFAULT; mt<H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,mt)) {
memb_fapl[mt] = H5P_DEFAULT;
- sprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
+ HDsprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
memb_name[mt] = sv[mt];
memb_addr[mt] = (haddr_t)MAX(mt - 1,0) * (HADDR_MAX / 10);
}
@@ -1225,7 +1225,7 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
rc = POSIXCLOSE(fd->posixfd);
if (rc != 0){
- fprintf(stderr, "POSIX File Close failed\n");
+ HDfprintf(stderr, "POSIX File Close failed\n");
GOTOERROR(FAIL);
}
@@ -1236,7 +1236,7 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
hrc = H5Fclose(fd->h5fd);
if (hrc < 0) {
- fprintf(stderr, "HDF5 File Close failed\n");
+ HDfprintf(stderr, "HDF5 File Close failed\n");
GOTOERROR(FAIL);
}
diff --git a/tools/test/perform/sio_perf.c b/tools/test/perform/sio_perf.c
index 731be47..ff9e2b4 100644
--- a/tools/test/perform/sio_perf.c
+++ b/tools/test/perform/sio_perf.c
@@ -281,14 +281,14 @@ struct options {
int buf_rank; /* Rank */
int order_rank; /* Rank */
int chk_rank; /* Rank */
- int print_times; /* print times as well as throughputs */
- int print_raw; /* print raw data throughput info */
+ int print_times; /* print times as well as throughputs */
+ int print_raw; /* print raw data throughput info */
hsize_t h5_alignment; /* alignment in HDF5 file */
hsize_t h5_threshold; /* threshold for alignment in HDF5 file */
- int h5_use_chunks; /* Make HDF5 dataset chunked */
- int h5_write_only; /* Perform the write tests only */
- int h5_extendable; /* Perform the write tests only */
- int verify; /* Verify data correctness */
+ int h5_use_chunks; /* Make HDF5 dataset chunked */
+ int h5_write_only; /* Perform the write tests only */
+ int h5_extendable; /* Perform the write tests only */
+ int verify; /* Verify data correctness */
vfdtype vfd; /* File driver */
size_t page_buffer_size;
size_t page_size;
@@ -333,7 +333,7 @@ main(int argc, char **argv)
/* Initialize h5tools lib */
h5tools_init();
#endif
-
+
output = stdout;
opts = parse_command_line(argc, argv);
@@ -345,8 +345,8 @@ main(int argc, char **argv)
if (opts->output_file) {
if ((output = HDfopen(opts->output_file, "w")) == NULL) {
- fprintf(stderr, "%s: cannot open output file\n", progname);
- perror(opts->output_file);
+ HDfprintf(stderr, "%s: cannot open output file\n", progname);
+ HDperror(opts->output_file);
goto finish;
}
}
@@ -356,7 +356,7 @@ main(int argc, char **argv)
run_test_loop(opts);
finish:
- free(opts);
+ HDfree(opts);
return exit_value;
}
@@ -385,8 +385,8 @@ static void
run_test_loop(struct options *opts)
{
parameters parms;
- int i;
- size_t buf_bytes;
+ int i;
+ size_t buf_bytes;
/* load options into parameter structure */
parms.num_files = opts->num_files;
@@ -552,7 +552,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
/*
* Show various statistics
*/
- /* Write statistics */
+ /* Write statistics */
/* Print the raw data throughput if desired */
if (opts->print_raw) {
/* accumulate and output the max, min, and average "raw write" times */
@@ -598,7 +598,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
output_results(opts,"Write Open-Close",write_gross_mm_table,parms.num_iters,raw_size);
if (!parms.h5_write_only) {
- /* Read statistics */
+ /* Read statistics */
/* Print the raw data throughput if desired */
if (opts->print_raw) {
/* accumulate and output the max, min, and average "raw read" times */
@@ -647,16 +647,16 @@ run_test(iotype iot, parameters parms, struct options *opts)
}
/* clean up our mess */
- free(write_sys_mm_table);
- free(write_mm_table);
- free(write_gross_mm_table);
- free(write_raw_mm_table);
+ HDfree(write_sys_mm_table);
+ HDfree(write_mm_table);
+ HDfree(write_gross_mm_table);
+ HDfree(write_raw_mm_table);
if (!parms.h5_write_only) {
- free(read_sys_mm_table);
- free(read_mm_table);
- free(read_gross_mm_table);
- free(read_raw_mm_table);
+ HDfree(read_sys_mm_table);
+ HDfree(read_mm_table);
+ HDfree(read_gross_mm_table);
+ HDfree(read_raw_mm_table);
}
return ret_value;
@@ -790,9 +790,9 @@ output_report(const char *fmt, ...)
{
va_list ap;
- va_start(ap, fmt);
- vfprintf(output, fmt, ap);
- va_end(ap);
+ HDva_start(ap, fmt);
+ HDvfprintf(output, fmt, ap);
+ HDva_end(ap);
}
/*
@@ -806,10 +806,10 @@ output_report(const char *fmt, ...)
static void
print_indent(register int indent)
{
- indent *= TAB_SPACE;
+ indent *= TAB_SPACE;
- for (; indent > 0; --indent)
- fputc(' ', output);
+ for (; indent > 0; --indent)
+ HDfputc(' ', output);
}
static void
@@ -833,9 +833,9 @@ static void
print_io_api(long io_types)
{
if (io_types & SIO_POSIX)
- HDfprintf(output, "posix ");
+ HDfprintf(output, "posix ");
if (io_types & SIO_HDF5)
- HDfprintf(output, "hdf5 ");
+ HDfprintf(output, "hdf5 ");
HDfprintf(output, "\n");
}
@@ -845,7 +845,7 @@ report_parameters(struct options *opts)
int i, rank;
rank = opts->dset_rank;
- print_version("HDF5 Library"); /* print library version */
+ print_version("HDF5 Library"); /* print library version */
HDfprintf(output, "==== Parameters ====\n");
HDfprintf(output, "IO API=");
@@ -1004,7 +1004,7 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
if (!HDstrcasecmp(buf, "hdf5")) {
@@ -1012,9 +1012,9 @@ parse_command_line(int argc, char *argv[])
} else if (!HDstrcasecmp(buf, "posix")) {
cl_opts->io_types |= SIO_POSIX;
} else {
- fprintf(stderr, "sio_perf: invalid --api option %s\n",
+ HDfprintf(stderr, "sio_perf: invalid --api option %s\n",
buf);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
if (*end == '\0')
@@ -1043,7 +1043,7 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
cl_opts->chk_size[j] = parse_size_directive(buf);
@@ -1071,17 +1071,17 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
- if (strlen(buf) > 1 || isdigit(buf[0])) {
+ if (HDstrlen(buf) > 1 || HDisdigit(buf[0])) {
size_t j;
for (j = 0; j < 10 && buf[j] != '\0'; ++j)
- if (!isdigit(buf[j])) {
- fprintf(stderr, "sio_perf: invalid --debug option %s\n",
+ if (!HDisdigit(buf[j])) {
+ HDfprintf(stderr, "sio_perf: invalid --debug option %s\n",
buf);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
sio_debug_level = atoi(buf);
@@ -1105,8 +1105,8 @@ parse_command_line(int argc, char *argv[])
cl_opts->verify = TRUE;
break;
default:
- fprintf(stderr, "sio_perf: invalid --debug option %s\n", buf);
- exit(EXIT_FAILURE);
+ HDfprintf(stderr, "sio_perf: invalid --debug option %s\n", buf);
+ HDexit(EXIT_FAILURE);
}
}
@@ -1129,7 +1129,7 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
cl_opts->dset_size[j] = parse_size_directive(buf);
@@ -1147,7 +1147,7 @@ parse_command_line(int argc, char *argv[])
break;
case 'i':
- cl_opts->num_iters = atoi(opt_arg);
+ cl_opts->num_iters = HDatoi(opt_arg);
break;
case 'o':
cl_opts->output_file = opt_arg;
@@ -1171,9 +1171,9 @@ parse_command_line(int argc, char *argv[])
} else if (!HDstrcasecmp(opt_arg, "direct")) {
cl_opts->vfd=direct;
} else {
- fprintf(stderr, "sio_perf: invalid --api option %s\n",
+ HDfprintf(stderr, "sio_perf: invalid --api option %s\n",
opt_arg);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
break;
case 'w':
@@ -1193,7 +1193,7 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
cl_opts->buf_size[j] = parse_size_directive(buf);
@@ -1221,7 +1221,7 @@ parse_command_line(int argc, char *argv[])
HDmemset(buf, '\0', sizeof(buf));
for (i = 0; *end != '\0' && *end != ','; ++end)
- if (isalnum(*end) && i < 10)
+ if (HDisalnum(*end) && i < 10)
buf[i++] = *end;
cl_opts->order[j] = (int)parse_size_directive(buf);
@@ -1243,7 +1243,7 @@ parse_command_line(int argc, char *argv[])
case '?':
default:
usage(progname);
- free(cl_opts);
+ HDfree(cl_opts);
return NULL;
}
}
@@ -1339,8 +1339,8 @@ parse_size_directive(const char *size)
break;
default:
- fprintf(stderr, "Illegal size specifier '%c'\n", *endptr);
- exit(EXIT_FAILURE);
+ HDfprintf(stderr, "Illegal size specifier '%c'\n", *endptr);
+ HDexit(EXIT_FAILURE);
}
}
@@ -1357,72 +1357,72 @@ parse_size_directive(const char *size)
static void
usage(const char *prog)
{
- print_version(prog);
- printf("usage: %s [OPTIONS]\n", prog);
- printf(" OPTIONS\n");
- printf(" -h Print an usage message and exit\n");
- printf(" -A AL Which APIs to test\n");
- printf(" [default: all of them]\n");
- printf(" -c SL Selects chunked storage and defines chunks dimensions\n");
- printf(" and sizes\n");
- printf(" [default: Off]\n");
- printf(" -e SL Dimensions and sizes of dataset\n");
- printf(" [default: 100,200]\n");
- printf(" -i N Number of iterations to perform\n");
- printf(" [default: 1]\n");
- printf(" -r NL Dimension access order (see below for description)\n");
- printf(" [default: 1,2]\n");
- printf(" -t Selects extendable dimensions for HDF5 dataset\n");
- printf(" [default: Off]\n");
- printf(" -v VFD Selects file driver for HDF5 access\n");
- printf(" [default: sec2]\n");
- printf(" -w Perform write tests, not the read tests\n");
- printf(" [default: Off]\n");
- printf(" -x SL Dimensions and sizes of the transfer buffer\n");
- printf(" [default: 10,20]\n");
- printf("\n");
- printf(" N - is an integer > 0.\n");
- printf("\n");
- printf(" S - is a size specifier, an integer > 0 followed by a size indicator:\n");
- printf(" K - Kilobyte (%d)\n", ONE_KB);
- printf(" M - Megabyte (%d)\n", ONE_MB);
- printf(" G - Gigabyte (%d)\n", ONE_GB);
- printf("\n");
- printf(" Example: '37M' is 37 megabytes or %d bytes\n", 37*ONE_MB);
- printf("\n");
- printf(" AL - is an API list. Valid values are:\n");
- printf(" hdf5 - HDF5\n");
- printf(" posix - POSIX\n");
- printf("\n");
- printf(" Example: -A posix,hdf5\n");
- printf("\n");
- printf(" NL - is list of integers (N) separated by commas.\n");
- printf("\n");
- printf(" Example: 1,2,3\n");
- printf("\n");
- printf(" SL - is list of size specifiers (S) separated by commas.\n");
- printf("\n");
- printf(" Example: 2K,2K,3K\n");
- printf("\n");
- printf(" The example defines an object (dataset, tranfer buffer) with three\n");
- printf(" dimensions. Be aware that as the number of dimensions increases, the\n");
- printf(" the total size of the object increases exponentially.\n");
- printf("\n");
- printf(" VFD - is an HDF5 file driver specifier. Valid values are:\n");
- printf(" sec2, stdio, core, split, multi, family, direct\n");
- printf("\n");
- printf(" Dimension access order:\n");
- printf(" Data access starts at the cardinal origin of the dataset using the\n");
- printf(" transfer buffer. The next access occurs on a dataset region next to\n");
- printf(" the previous one. For a multidimensional dataset, there are several\n");
- printf(" directions as to where to proceed. This can be specified in the dimension\n");
- printf(" access order. For example, -r 1,2 states that the tool should traverse\n");
- printf(" dimension 1 first, and then dimension 2.\n");
- printf("\n");
- printf(" Environment variables:\n");
- printf(" HDF5_NOCLEANUP Do not remove data files if set [default remove]\n");
- printf(" HDF5_PREFIX Data file prefix\n");
- printf("\n");
+ print_version(prog);
+ HDprintf("usage: %s [OPTIONS]\n", prog);
+ HDprintf(" OPTIONS\n");
+ HDprintf(" -h Print an usage message and exit\n");
+ HDprintf(" -A AL Which APIs to test\n");
+ HDprintf(" [default: all of them]\n");
+ HDprintf(" -c SL Selects chunked storage and defines chunks dimensions\n");
+ HDprintf(" and sizes\n");
+ HDprintf(" [default: Off]\n");
+ HDprintf(" -e SL Dimensions and sizes of dataset\n");
+ HDprintf(" [default: 100,200]\n");
+ HDprintf(" -i N Number of iterations to perform\n");
+ HDprintf(" [default: 1]\n");
+ HDprintf(" -r NL Dimension access order (see below for description)\n");
+ HDprintf(" [default: 1,2]\n");
+ HDprintf(" -t Selects extendable dimensions for HDF5 dataset\n");
+ HDprintf(" [default: Off]\n");
+ HDprintf(" -v VFD Selects file driver for HDF5 access\n");
+ HDprintf(" [default: sec2]\n");
+ HDprintf(" -w Perform write tests, not the read tests\n");
+ HDprintf(" [default: Off]\n");
+ HDprintf(" -x SL Dimensions and sizes of the transfer buffer\n");
+ HDprintf(" [default: 10,20]\n");
+ HDprintf("\n");
+ HDprintf(" N - is an integer > 0.\n");
+ HDprintf("\n");
+ HDprintf(" S - is a size specifier, an integer > 0 followed by a size indicator:\n");
+ HDprintf(" K - Kilobyte (%d)\n", ONE_KB);
+ HDprintf(" M - Megabyte (%d)\n", ONE_MB);
+ HDprintf(" G - Gigabyte (%d)\n", ONE_GB);
+ HDprintf("\n");
+ HDprintf(" Example: '37M' is 37 megabytes or %d bytes\n", 37*ONE_MB);
+ HDprintf("\n");
+ HDprintf(" AL - is an API list. Valid values are:\n");
+ HDprintf(" hdf5 - HDF5\n");
+ HDprintf(" posix - POSIX\n");
+ HDprintf("\n");
+ HDprintf(" Example: -A posix,hdf5\n");
+ HDprintf("\n");
+ HDprintf(" NL - is list of integers (N) separated by commas.\n");
+ HDprintf("\n");
+ HDprintf(" Example: 1,2,3\n");
+ HDprintf("\n");
+ HDprintf(" SL - is list of size specifiers (S) separated by commas.\n");
+ HDprintf("\n");
+ HDprintf(" Example: 2K,2K,3K\n");
+ HDprintf("\n");
+ HDprintf(" The example defines an object (dataset, tranfer buffer) with three\n");
+ HDprintf(" dimensions. Be aware that as the number of dimensions increases, the\n");
+ HDprintf(" the total size of the object increases exponentially.\n");
+ HDprintf("\n");
+ HDprintf(" VFD - is an HDF5 file driver specifier. Valid values are:\n");
+ HDprintf(" sec2, stdio, core, split, multi, family, direct\n");
+ HDprintf("\n");
+ HDprintf(" Dimension access order:\n");
+ HDprintf(" Data access starts at the cardinal origin of the dataset using the\n");
+ HDprintf(" transfer buffer. The next access occurs on a dataset region next to\n");
+ HDprintf(" the previous one. For a multidimensional dataset, there are several\n");
+ HDprintf(" directions as to where to proceed. This can be specified in the dimension\n");
+ HDprintf(" access order. For example, -r 1,2 states that the tool should traverse\n");
+ HDprintf(" dimension 1 first, and then dimension 2.\n");
+ HDprintf("\n");
+ HDprintf(" Environment variables:\n");
+ HDprintf(" HDF5_NOCLEANUP Do not remove data files if set [default remove]\n");
+ HDprintf(" HDF5_PREFIX Data file prefix\n");
+ HDprintf("\n");
fflush(stdout);
} /* end usage() */
diff --git a/tools/test/perform/sio_standalone.h b/tools/test/perform/sio_standalone.h
index e73a6b5..5ce922b 100644
--- a/tools/test/perform/sio_standalone.h
+++ b/tools/test/perform/sio_standalone.h
@@ -316,6 +316,7 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
#define HDpipe(F) pipe(F)
#define HDpow(X,Y) pow(X,Y)
/* printf() variable arguments */
+#define HDprintf(...) HDfprintf(stdout, __VA_ARGS__)
#define HDputc(C,F) putc(C,F)
#define HDputchar(C) putchar(C)
#define HDputs(S) puts(S)
@@ -377,6 +378,7 @@ H5_DLL int c99_snprintf(char* str, size_t size, const char* format, ...);
#define HDsnprintf snprintf /*varargs*/
#endif
/* sprintf() variable arguments */
+#define HDsprintf sprintf /*varargs*/
#define HDsqrt(X) sqrt(X)
#ifdef H5_HAVE_RAND_R
H5_DLL void HDsrand(unsigned int seed);
diff --git a/tools/testfiles/tudlink-2.ddl b/tools/testfiles/tudlink-2.ddl
index 7f4281a..82b9f8e 100644
--- a/tools/testfiles/tudlink-2.ddl
+++ b/tools/testfiles/tudlink-2.ddl
@@ -1,6 +1,5 @@
HDF5 "tudlink.h5" {
USERDEFINED_LINK "udlink2" {
- USERDEFINED_LINK "udlink2" {
LINKCLASS 187
}
}