summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-11-13 19:49:38 (GMT)
committerGitHub <noreply@github.com>2023-11-13 19:49:38 (GMT)
commit28d2b6771f41396f1e243e00cb9dd57c4c891613 (patch)
tree238e77e247ac6b688d0eea0a6f81df95e47448dc /testpar
parent8b3ffdef3099d2699ec71a5f855966132b3d3c25 (diff)
downloadhdf5-28d2b6771f41396f1e243e00cb9dd57c4c891613.zip
hdf5-28d2b6771f41396f1e243e00cb9dd57c4c891613.tar.gz
hdf5-28d2b6771f41396f1e243e00cb9dd57c4c891613.tar.bz2
HDF5 API test updates (#3835)
* HDF5 API test updates Removed test duplication from bringing API tests back into the library from external VOL tests repo Synced changes between API tests and library's tests Updated API tests CMake code to directly use and install testhdf5, testphdf5, etc. instead of creating duplicate binaries Added new h5_using_native_vol() test function to determine whether the VOL connector being used is (or the VOL connector stack being used resolves to) the native VOL connector * Remove duplicate variable
Diffstat (limited to 'testpar')
-rw-r--r--testpar/API/CMakeLists.txt528
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.c3
-rw-r--r--testpar/API/t_bigio.c1938
-rw-r--r--testpar/API/t_chunk_alloc.c507
-rw-r--r--testpar/API/t_coll_chunk.c1345
-rw-r--r--testpar/API/t_coll_md_read.c624
-rw-r--r--testpar/API/t_dset.c4317
-rw-r--r--testpar/API/t_file.c1044
-rw-r--r--testpar/API/t_file_image.c385
-rw-r--r--testpar/API/t_filter_read.c532
-rw-r--r--testpar/API/t_mdset.c2827
-rw-r--r--testpar/API/t_ph5basic.c188
-rw-r--r--testpar/API/t_prop.c646
-rw-r--r--testpar/API/t_pshutdown.c147
-rw-r--r--testpar/API/t_shapesame.c4484
-rw-r--r--testpar/API/t_span_tree.c2588
-rw-r--r--testpar/API/testphdf5.c1006
-rw-r--r--testpar/API/testphdf5.h342
-rw-r--r--testpar/t_bigio.c46
-rw-r--r--testpar/t_chunk_alloc.c108
-rw-r--r--testpar/t_coll_chunk.c166
-rw-r--r--testpar/t_coll_md.c52
-rw-r--r--testpar/t_dset.c204
-rw-r--r--testpar/t_file.c40
-rw-r--r--testpar/t_file_image.c14
-rw-r--r--testpar/t_filter_read.c59
-rw-r--r--testpar/t_mdset.c210
-rw-r--r--testpar/t_prop.c1
-rw-r--r--testpar/t_pshutdown.c19
-rw-r--r--testpar/t_shapesame.c132
-rw-r--r--testpar/t_span_tree.c127
-rw-r--r--testpar/testphdf5.c14
32 files changed, 1298 insertions, 23345 deletions
diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt
index 869a925..818bee6 100644
--- a/testpar/API/CMakeLists.txt
+++ b/testpar/API/CMakeLists.txt
@@ -13,7 +13,7 @@ cmake_minimum_required (VERSION 3.18)
project (HDF5_TEST_PAR_API C)
#------------------------------------------------------------------------------
-# Define for API tests
+# Variables, definitions, etc. for API tests
#------------------------------------------------------------------------------
set (HDF5_API_TESTS
@@ -34,7 +34,9 @@ if (HDF5_TEST_API_ENABLE_ASYNC)
)
endif ()
-# Ported HDF5 tests
+# Extra HDF5 tests to run. Each entry in the list
+# must be a CMake target name for a test executable
+# that was added elsewhere in the project
set (HDF5_API_PAR_TESTS_EXTRA
t_bigio
t_pshutdown
@@ -43,9 +45,12 @@ set (HDF5_API_PAR_TESTS_EXTRA
)
# List of files generated by the HDF5 API tests which
-# should be cleaned up in case the test failed to remove
-# them
+# we should attempt to clean up in case the tests failed
+# to remove them
+# TODO: Run h5delete tool with appropriate env. vars for
+# connectors to remove these files
set (HDF5_API_PAR_TESTS_FILES
+ # TODO
H5_api_test_parallel.h5
H5_api_async_test_parallel.h5
H5_api_async_test_parallel_0.h5
@@ -96,22 +101,23 @@ target_compile_definitions (
PRIVATE
"$<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>"
)
-if (NOT BUILD_SHARED_LIBS)
- TARGET_C_PROPERTIES (h5_api_test_parallel STATIC)
+# Always prefer linking the shared HDF5 library by default
+if (BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_parallel SHARED)
target_link_libraries (
h5_api_test_parallel
PRIVATE
- ${HDF5_TEST_LIB_TARGET}
- ${HDF5_LIB_TARGET}
+ ${HDF5_TEST_LIBSH_TARGET}
+ ${HDF5_LIBSH_TARGET}
"$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
)
else ()
- TARGET_C_PROPERTIES (h5_api_test_parallel SHARED)
+ TARGET_C_PROPERTIES (h5_api_test_parallel STATIC)
target_link_libraries (
h5_api_test_parallel
PRIVATE
- ${HDF5_TEST_LIBSH_TARGET}
- ${HDF5_LIBSH_TARGET}
+ ${HDF5_TEST_LIB_TARGET}
+ ${HDF5_LIB_TARGET}
"$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
)
endif ()
@@ -125,120 +131,15 @@ if (HDF5_ENABLE_FORMATTERS)
clang_format (HDF5_TEST_h5_api_test_parallel_FORMAT h5_api_test_parallel)
endif ()
-if (HDF5_TEST_API_INSTALL)
- install (
- TARGETS
- h5_api_test_parallel
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- DESTINATION
- ${HDF5_INSTALL_BIN_DIR}
- PERMISSIONS
- OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
- COMPONENT
- tests
- )
-endif ()
-
-#-----------------------------------------------------------------------------
-# Build the ported HDF5 test executables
-#-----------------------------------------------------------------------------
-foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA})
- unset (HDF5_API_PAR_TEST_EXTRA_SRCS)
-
- set (HDF5_API_PAR_TEST_EXTRA_SRCS
- ${HDF5_API_PAR_TEST_EXTRA_SRCS}
- ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c
- )
-
- if (${api_test_extra} STREQUAL "testphdf5")
- set (HDF5_API_PAR_TEST_EXTRA_SRCS
- ${HDF5_API_PAR_TEST_EXTRA_SRCS}
- ${CMAKE_CURRENT_SOURCE_DIR}/t_ph5basic.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_file.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_dset.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_mdset.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_chunk.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_span_tree.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_prop.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_file_image.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_md_read.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_chunk_alloc.c
- ${CMAKE_CURRENT_SOURCE_DIR}/t_filter_read.c
- )
- endif ()
-
- add_executable (h5_api_test_parallel_${api_test_extra} ${HDF5_API_PAR_TEST_EXTRA_SRCS})
- target_include_directories (
- h5_api_test_parallel_${api_test_extra}
- PRIVATE
- "${HDF5_SRC_INCLUDE_DIRS}"
- "${HDF5_TEST_PAR_DIR}"
- "${HDF5_TEST_API_SRC_DIR}"
- "${HDF5_TEST_API_PAR_SRC_DIR}"
- "${HDF5_SRC_BINARY_DIR}"
- "${HDF5_TEST_BINARY_DIR}"
- "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
- )
- target_compile_options (
- h5_api_test_parallel_${api_test_extra}
- PRIVATE
- "${HDF5_CMAKE_C_FLAGS}"
- )
- target_compile_definitions (
- h5_api_test_parallel_${api_test_extra}
- PRIVATE
- "$<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>"
- )
- if (NOT BUILD_SHARED_LIBS)
- TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC)
- target_link_libraries (
- h5_api_test_parallel_${api_test_extra}
- PRIVATE
- ${HDF5_TEST_LIB_TARGET}
- ${HDF5_LIB_TARGET}
- "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
- )
- else ()
- TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} SHARED)
- target_link_libraries (
- h5_api_test_parallel_${api_test_extra}
- PRIVATE
- ${HDF5_TEST_LIBSH_TARGET}
- ${HDF5_LIBSH_TARGET}
- "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
- )
- endif ()
- set_target_properties (
- h5_api_test_parallel_${api_test_extra}
- PROPERTIES
- FOLDER test/par/API
- )
- # Add Target to clang-format
- if (HDF5_ENABLE_FORMATTERS)
- clang_format (HDF5_TEST_h5_api_test_parallel_${api_test_extra}_FORMAT h5_api_test_parallel_${api_test_extra})
- endif ()
-
- if (HDF5_TEST_API_INSTALL)
- install (
- TARGETS
- h5_api_test_parallel_${api_test_extra}
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- DESTINATION
- ${HDF5_INSTALL_BIN_DIR}
- PERMISSIONS
- OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
- COMPONENT
- tests
- )
- endif ()
-endforeach ()
-
#-----------------------------------------------------------------------------
# Add tests if HDF5 parallel testing is enabled
#-----------------------------------------------------------------------------
if (HDF5_TEST_PARALLEL)
+ # Setup working directories for any external VOL connectors to be tested
+ foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS})
+ file (MAKE_DIRECTORY "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}")
+ endforeach ()
+
if (HDF5_TEST_API_ENABLE_DRIVER)
if ("${HDF5_TEST_API_SERVER}" STREQUAL "")
message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.")
@@ -259,6 +160,7 @@ if (HDF5_TEST_PARALLEL)
)
endif ()
+ # Add main API tests to test suite
set (last_api_test "")
foreach (api_test ${HDF5_API_TESTS})
add_test (
@@ -275,17 +177,6 @@ if (HDF5_TEST_PARALLEL)
set (last_api_test "h5_api_test_parallel_${api_test}")
endforeach ()
- foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
- add_test (
- NAME "h5_api_test_parallel_${hdf5_test}"
- COMMAND $<TARGET_FILE:h5_api_test_driver>
- --server ${HDF5_TEST_API_SERVER}
- --client $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
- --serial
- ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
- )
- endforeach ()
-
# Hook external tests to same test suite
foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS})
add_test (
@@ -298,97 +189,103 @@ if (HDF5_TEST_PARALLEL)
)
endforeach ()
- # Add tests for each external VOL connector that was built
- foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS})
- # Determine whether connector should be tested with parallel tests
- get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL)
- if (${vol_test_parallel})
- # Determine environment variables that need to be set for testing
- set (vol_test_env "")
- set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
-
- get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME)
- list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}")
-
- get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS)
- foreach (lib_target ${vol_lib_targets})
- get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY)
- if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND"
- AND NOT "${lib_target_output_dir}" STREQUAL ""
- AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
- set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}")
- endif ()
- endforeach ()
-
- list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}")
-
- # Add main API tests
- set (last_api_test "")
- foreach (api_test ${HDF5_API_TESTS})
- add_test (
- NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
- COMMAND $<TARGET_FILE:h5_api_test_driver>
- --server ${HDF5_TEST_API_SERVER}
- --client $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
- --serial
- ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
- )
- set_tests_properties (
- "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
- PROPERTIES
- ENVIRONMENT
- "${vol_test_env}"
- WORKING_DIRECTORY
- "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
- DEPENDS
- "${last_api_test}"
- )
-
- set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}")
- endforeach ()
-
- # Add any extra HDF5 tests
- foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
- add_test (
- NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
- COMMAND $<TARGET_FILE:h5_api_test_driver>
- --server ${HDF5_TEST_API_SERVER}
- --client $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
- --serial
- ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
- )
- set_tests_properties (
- "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
- PROPERTIES
- ENVIRONMENT
- "${vol_test_env}"
- WORKING_DIRECTORY
- "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
- )
- endforeach ()
-
- # Hook external tests to same test suite
- foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS})
- add_test (
- NAME "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}"
- COMMAND $<TARGET_FILE:h5_api_test_driver>
- --server ${HDF5_TEST_API_SERVER}
- --client $<TARGET_FILE:${ext_api_test}>
- --serial
- ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
- )
- set_tests_properties (
- "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}"
- PROPERTIES
- ENVIRONMENT
- "${vol_test_env}"
- WORKING_DIRECTORY
- "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
- )
- endforeach ()
- endif ()
- endforeach ()
+ if (BUILD_SHARED_LIBS)
+ # Add tests for each external VOL connector that was built,
+ # but only if executables that were linked to a shared HDF5
+ # library are available, since static executables will cause
+ # issues when VOL connectors are loaded dynamically
+ foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS})
+ # Determine whether connector should be tested with parallel tests
+ get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL)
+ if (${vol_test_parallel})
+ # Determine environment variables that need to be set for testing
+ set (vol_test_env "")
+ set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
+
+ get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME)
+ list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}")
+
+ get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS)
+ foreach (lib_target ${vol_lib_targets})
+ get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY)
+ if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND"
+ AND NOT "${lib_target_output_dir}" STREQUAL ""
+ AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
+ set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}")
+ endif ()
+ endforeach ()
+
+ list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}")
+
+ # Add main API tests to test suite
+ set (last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ set_tests_properties (
+ "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
+ PROPERTIES
+ ENVIRONMENT
+ "${vol_test_env}"
+ WORKING_DIRECTORY
+ "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
+ DEPENDS
+ "${last_api_test}"
+ )
+
+ set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ # Add any extra HDF5 tests to test suite
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:${hdf5_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ set_tests_properties (
+ "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
+ PROPERTIES
+ ENVIRONMENT
+ "${vol_test_env}"
+ WORKING_DIRECTORY
+ "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
+ )
+ endforeach ()
+
+ # Hook external tests to same test suite
+ foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS})
+ add_test (
+ NAME "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:${ext_api_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ set_tests_properties (
+ "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}"
+ PROPERTIES
+ ENVIRONMENT
+ "${vol_test_env}"
+ WORKING_DIRECTORY
+ "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
+ )
+ endforeach ()
+ endif ()
+ endforeach ()
+ endif ()
else ()
+ # Add main API tests to test suite
set (last_api_test "")
foreach (api_test ${HDF5_API_TESTS})
add_test (
@@ -403,80 +300,117 @@ if (HDF5_TEST_PARALLEL)
set (last_api_test "h5_api_test_parallel_${api_test}")
endforeach ()
- foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
- add_test (
- NAME "h5_api_test_parallel_${hdf5_test}"
- COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
- ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
- ${MPIEXEC_POSTFLAGS}
- )
- endforeach ()
-
- # Add tests for each external VOL connector that was built
- foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS})
- # Determine whether connector should be tested with parallel tests
- get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL)
- if (${vol_test_parallel})
- # Determine environment variables that need to be set for testing
- set (vol_test_env "")
- set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
-
- get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME)
- list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}")
-
- get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS)
- foreach (lib_target ${vol_lib_targets})
- get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY)
- if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND"
- AND NOT "${lib_target_output_dir}" STREQUAL ""
- AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
- set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}")
- endif ()
- endforeach ()
-
- list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}")
-
- # Add main API tests
- set (last_api_test "")
- foreach (api_test ${HDF5_API_TESTS})
- add_test (
- NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
- COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
- ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
- ${MPIEXEC_POSTFLAGS}
- )
- set_tests_properties (
- "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
- PROPERTIES
- ENVIRONMENT
- "${vol_test_env}"
- WORKING_DIRECTORY
- "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
- DEPENDS
- "${last_api_test}"
- )
-
- set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}")
- endforeach ()
-
- # Add any extra HDF5 tests
- foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
- add_test (
- NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
- COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
- ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
- ${MPIEXEC_POSTFLAGS}
- )
- set_tests_properties (
- "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
- PROPERTIES
- ENVIRONMENT
- "${vol_test_env}"
- WORKING_DIRECTORY
- "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
- )
- endforeach ()
- endif ()
- endforeach ()
+ if (BUILD_SHARED_LIBS)
+ # Add tests for each external VOL connector that was built,
+ # but only if executables that were linked to a shared HDF5
+ # library are available, since static executables will cause
+ # issues when VOL connectors are loaded dynamically
+ foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS})
+ # Determine whether connector should be tested with parallel tests
+ get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL)
+ if (${vol_test_parallel})
+ # Determine environment variables that need to be set for testing
+ set (vol_test_env "")
+ set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
+
+ get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME)
+ list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}")
+
+ get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS)
+ foreach (lib_target ${vol_lib_targets})
+ get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY)
+ if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND"
+ AND NOT "${lib_target_output_dir}" STREQUAL ""
+ AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}")
+ set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}")
+ endif ()
+ endforeach ()
+
+ list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}")
+
+ # Add main API tests to test suite
+ set (last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ ${MPIEXEC_POSTFLAGS}
+ )
+ set_tests_properties (
+ "${external_vol_tgt}-h5_api_test_parallel_${api_test}"
+ PROPERTIES
+ ENVIRONMENT
+ "${vol_test_env}"
+ WORKING_DIRECTORY
+ "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
+ DEPENDS
+ "${last_api_test}"
+ )
+
+ set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ # Add any extra HDF5 tests to test suite
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${hdf5_test}>
+ ${MPIEXEC_POSTFLAGS}
+ )
+ set_tests_properties (
+ "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}"
+ PROPERTIES
+ ENVIRONMENT
+ "${vol_test_env}"
+ WORKING_DIRECTORY
+ "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}"
+ )
+ endforeach ()
+ endif ()
+ endforeach ()
+ endif ()
endif ()
endif ()
+
+#-----------------------------------------------------------------------------
+# Install the main API test executable and any
+# extra HDF5 tests if requested
+#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS AND HDF5_TEST_API_INSTALL)
+ install (
+ TARGETS
+ h5_api_test_parallel
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ DESTINATION
+ ${HDF5_INSTALL_BIN_DIR}
+ PERMISSIONS
+ OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ COMPONENT
+ tests
+ )
+
+ foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA})
+ if (TARGET ${api_test_extra})
+ set_target_properties (
+ ${api_test_extra}
+ PROPERTIES
+ OUTPUT_NAME "h5_api_test_parallel_${api_test_extra}"
+ )
+ install (
+ TARGETS
+ ${api_test_extra}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ DESTINATION
+ ${HDF5_INSTALL_BIN_DIR}
+ PERMISSIONS
+ OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ COMPONENT
+ tests
+ )
+ endif ()
+ endforeach ()
+endif ()
diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c
index 0d53d44..169d594 100644
--- a/testpar/API/H5_api_dataset_test_parallel.c
+++ b/testpar/API/H5_api_dataset_test_parallel.c
@@ -82,7 +82,6 @@ static int (*par_dataset_tests[])(void) = {
* hyperslab selections and point selections.
*/
#define DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK 3
-#define DATASET_WRITE_DATA_VERIFY_TEST_NUM_POINTS 10
#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE H5T_NATIVE_INT
#define DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE sizeof(int)
#define DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME "dataset_write_data_verification_test"
@@ -2142,11 +2141,13 @@ error:
*
* XXX: Currently pulls from invalid memory locations.
*/
+#ifdef BROKEN
#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK 2
#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE sizeof(int)
#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME "hyper_sel_file_all_sel_mem_write_test"
#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME "hyper_sel_file_all_sel_mem_dset"
+#endif
static int
test_write_dataset_hyper_file_all_mem(void)
{
diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c
deleted file mode 100644
index e7bdfb0..0000000
--- a/testpar/API/t_bigio.c
+++ /dev/null
@@ -1,1938 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#if 0
-#include "H5Dprivate.h" /* For Chunk tests */
-#endif
-
-/* FILENAME and filenames must have the same number of names */
-const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL};
-
-/* Constants definitions */
-#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
-
-/* Define some handy debugging shorthands, routines, ... */
-/* debugging tools */
-
-#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
-
-/* Constants definitions */
-#define RANK 2
-
-#define IN_ORDER 1
-#define OUT_OF_ORDER 2
-
-#define DATASET1 "DSET1"
-#define DATASET2 "DSET2"
-#define DATASET3 "DSET3"
-#define DATASET4 "DSET4"
-#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
-#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
-#define DXFER_BIGCOUNT (1 << 29)
-
-#define HYPER 1
-#define POINT 2
-#define ALL 3
-
-/* Dataset data type. Int's can be easily octo dumped. */
-typedef hsize_t B_DATATYPE;
-
-int facc_type = FACC_MPIO; /*Test file access type */
-int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-size_t bigcount = (size_t) /* DXFER_BIGCOUNT */ 1310720;
-int nerrors = 0;
-static int mpi_size_g, mpi_rank_g;
-
-hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
-hsize_t space_dim2 = SPACE_DIM2;
-
-static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
- int file_selection, int mem_selection, int mode);
-
-/*
- * Setup the coordinates for point selection.
- */
-static void
-set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
- hsize_t coords[], int order)
-{
- hsize_t i, j, k = 0, m, n, s1, s2;
-
- if (OUT_OF_ORDER == order)
- k = (num_points * RANK) - 1;
- else if (IN_ORDER == order)
- k = 0;
-
- s1 = start[0];
- s2 = start[1];
-
- for (i = 0; i < count[0]; i++)
- for (j = 0; j < count[1]; j++)
- for (m = 0; m < block[0]; m++)
- for (n = 0; n < block[1]; n++)
- if (OUT_OF_ORDER == order) {
- coords[k--] = s2 + (stride[1] * j) + n;
- coords[k--] = s1 + (stride[0] * i) + m;
- }
- else if (IN_ORDER == order) {
- coords[k++] = s1 + stride[0] * i + m;
- coords[k++] = s2 + stride[1] * j + n;
- }
-}
-
-/*
- * Fill the dataset with trivial data for testing.
- * Assume dimension rank is 2 and data is stored contiguous.
- */
-static void
-fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
-{
- B_DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* put some trivial data in the data_array */
- for (i = 0; i < block[0]; i++) {
- for (j = 0; j < block[1]; j++) {
- *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
- dataptr++;
- }
- }
-}
-
-/*
- * Setup the coordinates for point selection.
- */
-void
-point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
- hsize_t coords[], int order)
-{
- hsize_t i, j, k = 0, m, n, s1, s2;
-
- HDcompile_assert(RANK == 2);
-
- if (OUT_OF_ORDER == order)
- k = (num_points * RANK) - 1;
- else if (IN_ORDER == order)
- k = 0;
-
- s1 = start[0];
- s2 = start[1];
-
- for (i = 0; i < count[0]; i++)
- for (j = 0; j < count[1]; j++)
- for (m = 0; m < block[0]; m++)
- for (n = 0; n < block[1]; n++)
- if (OUT_OF_ORDER == order) {
- coords[k--] = s2 + (stride[1] * j) + n;
- coords[k--] = s1 + (stride[0] * i) + m;
- }
- else if (IN_ORDER == order) {
- coords[k++] = s1 + stride[0] * i + m;
- coords[k++] = s2 + stride[1] * j + n;
- }
-
- if (VERBOSE_MED) {
- printf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "total datapoints=%" PRIuHSIZE "\n",
- start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
- block[0] * block[1] * count[0] * count[1]);
- k = 0;
- for (i = 0; i < num_points; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
- k += 2;
- }
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-static void
-dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
-{
- B_DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* print the column heading */
- printf("%-8s", "Cols:");
- for (j = 0; j < block[1]; j++) {
- printf("%3" PRIuHSIZE " ", start[1] + j);
- }
- printf("\n");
-
- /* print the slab data */
- for (i = 0; i < block[0]; i++) {
- printf("Row %2" PRIuHSIZE ": ", i + start[0]);
- for (j = 0; j < block[1]; j++) {
- printf("%" PRIuHSIZE " ", *dataptr++);
- }
- printf("\n");
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-static int
-verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset,
- B_DATATYPE *original)
-{
- hsize_t i, j;
- int vrfyerrs;
-
- /* print it if VERBOSE_MED */
- if (VERBOSE_MED) {
- printf("verify_data dumping:::\n");
- printf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
- start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
- }
-
- vrfyerrs = 0;
- for (i = 0; i < block[0]; i++) {
- for (j = 0; j < block[1]; j++) {
- if (*dataset != *original) {
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- printf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
- "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
- "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
- i, j, i + start[0], j + start[1], *(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (vrfyerrs)
- printf("%d errors found in verify_data\n", vrfyerrs);
- return (vrfyerrs);
-}
-
-/* Set up the selection */
-static void
-ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- int mode)
-{
-
- switch (mode) {
-
- case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = space_dim1;
- count[1] = space_dim2;
- start[0] = (hsize_t)mpi_rank * count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = space_dim1 / (stride[0] * block[0]);
- count[1] = (space_dim2) / (stride[1] * block[1]);
- start[0] = space_dim1 * (hsize_t)mpi_rank;
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
- no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1);
- count[1] = space_dim2;
- start[0] = (hsize_t)mpi_rank * count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTUNBALANCE:
- /* The first one-third of the number of processes only
- select top half of the domain, The rest will select the bottom
- half of the domain. */
-
- block[0] = 1;
- count[0] = 2;
- stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1);
- block[1] = space_dim2;
- count[1] = 1;
- start[1] = 0;
- stride[1] = 1;
- if ((mpi_rank * 3) < (mpi_size * 2))
- start[0] = (hsize_t)mpi_rank;
- else
- start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3);
- break;
-
- case BYROW_SELECTINCHUNK:
- /* Each process will only select one chunk */
-
- block[0] = 1;
- count[0] = 1;
- start[0] = (hsize_t)mpi_rank * space_dim1;
- stride[0] = 1;
- block[1] = space_dim2;
- count[1] = 1;
- stride[1] = 1;
- start[1] = 0;
-
- break;
-
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = space_dim1 * (hsize_t)mpi_size;
- block[1] = space_dim2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
- }
- if (VERBOSE_MED) {
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
- }
-}
-
-/*
- * Fill the dataset with trivial data for testing.
- * Assume dimension rank is 2.
- */
-static void
-ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
- int mem_selection)
-{
- DATATYPE *dataptr = dataset;
- DATATYPE *tmptr;
- hsize_t i, j, k1, k2, k = 0;
- /* put some trivial data in the data_array */
- tmptr = dataptr;
-
- /* assign the disjoint block (two-dimensional)data array value
- through the pointer */
-
- for (k1 = 0; k1 < count[0]; k1++) {
- for (i = 0; i < block[0]; i++) {
- for (k2 = 0; k2 < count[1]; k2++) {
- for (j = 0; j < block[1]; j++) {
-
- if (ALL != mem_selection) {
- dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
- k2 * stride[1] + j);
- }
- else {
- dataptr = tmptr + k;
- k++;
- }
-
- *dataptr = (DATATYPE)(k1 + k2 + i + j);
- }
- }
- }
- }
-}
-
-/*
- * Print the first block of the content of the dataset.
- */
-static void
-ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
-
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
- for (j = 0; j < block[1]; j++) {
- printf("%3lu ", (unsigned long)(start[1] + j));
- }
- printf("\n");
-
- /* print the slab data */
- for (i = 0; i < block[0]; i++) {
- printf("Row %2lu: ", (unsigned long)(i + start[0]));
- for (j = 0; j < block[1]; j++) {
- printf("%03d ", *dataptr++);
- }
- printf("\n");
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-static int
-ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
- DATATYPE *original, int mem_selection)
-{
- hsize_t i, j, k1, k2, k = 0;
- int vrfyerrs;
- DATATYPE *dataptr, *oriptr;
-
- /* print it if VERBOSE_MED */
- if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- ccdataset_print(start, block, original);
- printf("compared values:\n");
- ccdataset_print(start, block, dataset);
- }
-
- vrfyerrs = 0;
-
- for (k1 = 0; k1 < count[0]; k1++) {
- for (i = 0; i < block[0]; i++) {
- for (k2 = 0; k2 < count[1]; k2++) {
- for (j = 0; j < block[1]; j++) {
- if (ALL != mem_selection) {
- dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
- k2 * stride[1] + j);
- oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
- k2 * stride[1] + j);
- }
- else {
- dataptr = dataset + k;
- oriptr = original + k;
- k++;
- }
- if (*dataptr != *oriptr) {
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
- }
- }
- }
- }
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
- return (vrfyerrs);
-}
-
-/*
- * Example of using the parallel HDF5 library to create two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
- * each process controls a hyperslab within.]
- */
-
-static void
-dataset_big_write(void)
-{
-
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- size_t num_points;
- B_DATATYPE *wdata;
-
- /* allocate memory for data buffer */
- wdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE));
- VRFY_G((wdata != NULL), "wdata malloc succeeded");
-
- /* setup file access template */
- acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
- H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
-
- /* create the file collectively */
- fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY_G((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY_G((ret >= 0), "");
-
- /* Each process takes a slabs of rows. */
- if (mpi_rank_g == 0)
- printf("\nTesting Dataset1 write by ROW\n");
- /* Create a large dataset */
- dims[0] = bigcount;
- dims[1] = (hsize_t)mpi_size_g;
-
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((sid >= 0), "H5Screate_simple succeeded");
- dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
- H5Sclose(sid);
-
- block[0] = dims[0] / (hsize_t)mpi_size_g;
- block[1] = dims[1];
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank_g * block[0];
- start[1] = 0;
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* fill the local slab with some trivial data */
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, wdata);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
- VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- /* Each process takes a slabs of cols. */
- if (mpi_rank_g == 0)
- printf("\nTesting Dataset2 write by COL\n");
- /* Create a large dataset */
- dims[0] = bigcount;
- dims[1] = (hsize_t)mpi_size_g;
-
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((sid >= 0), "H5Screate_simple succeeded");
- dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
- H5Sclose(sid);
-
- block[0] = dims[0];
- block[1] = dims[1] / (hsize_t)mpi_size_g;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank_g * block[1];
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* fill the local slab with some trivial data */
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, wdata);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
- VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- /* ALL selection */
- if (mpi_rank_g == 0)
- printf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
- /* Create a large dataset */
- dims[0] = bigcount;
- dims[1] = 1;
-
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((sid >= 0), "H5Screate_simple succeeded");
- dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
- H5Sclose(sid);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- if (mpi_rank_g == 0) {
- ret = H5Sselect_all(file_dataspace);
- VRFY_G((ret >= 0), "H5Sset_all succeeded");
- }
- else {
- ret = H5Sselect_none(file_dataspace);
- VRFY_G((ret >= 0), "H5Sset_none succeeded");
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "");
- if (mpi_rank_g != 0) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY_G((ret >= 0), "H5Sset_none succeeded");
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* fill the local slab with some trivial data */
- fill_datasets(start, dims, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- }
-
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
- VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- /* Point selection */
- if (mpi_rank_g == 0)
- printf("\nTesting Dataset4 write point selection\n");
- /* Create a large dataset */
- dims[0] = bigcount;
- dims[1] = (hsize_t)(mpi_size_g * 4);
-
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((sid >= 0), "H5Screate_simple succeeded");
- dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
- H5Sclose(sid);
-
- block[0] = dims[0] / 2;
- block[1] = 2;
- stride[0] = dims[0] / 2;
- stride[1] = 2;
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
-
- num_points = bigcount;
-
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY_G((coords != NULL), "coords malloc succeeded");
-
- set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
- /* create a file dataspace */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
-
- if (coords)
- free(coords);
-
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, wdata);
- }
-
- /* create a memory dataspace */
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- mem_dataspace = H5Screate_simple(1, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
- VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- free(wdata);
- H5Fclose(fid);
-}
-
-/*
- * Example of using the parallel HDF5 library to read two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
- * each process controls a hyperslab within.]
- */
-
-static void
-dataset_big_read(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset;
- B_DATATYPE *rdata = NULL; /* data buffer */
- B_DATATYPE *wdata = NULL; /* expected data buffer */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points;
- hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
-
- /* allocate memory for data buffer */
- rdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE));
- VRFY_G((rdata != NULL), "rdata malloc succeeded");
- wdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE));
- VRFY_G((wdata != NULL), "wdata malloc succeeded");
-
- memset(rdata, 0, bigcount * sizeof(B_DATATYPE));
-
- /* setup file access template */
- acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
- H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
-
- /* open the file collectively */
- fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl);
- VRFY_G((fid >= 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY_G((ret >= 0), "");
-
- if (mpi_rank_g == 0)
- printf("\nRead Testing Dataset1 by COL\n");
-
- dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
-
- dims[0] = bigcount;
- dims[1] = (hsize_t)mpi_size_g;
- /* Each process takes a slabs of cols. */
- block[0] = dims[0];
- block[1] = dims[1] / (hsize_t)mpi_size_g;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank_g * block[1];
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
- VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
-
- /* verify the read data with original expected data */
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if (ret) {
- fprintf(stderr, "verify failed\n");
- exit(1);
- }
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- if (mpi_rank_g == 0)
- printf("\nRead Testing Dataset2 by ROW\n");
- memset(rdata, 0, bigcount * sizeof(B_DATATYPE));
- dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
-
- dims[0] = bigcount;
- dims[1] = (hsize_t)mpi_size_g;
- /* Each process takes a slabs of rows. */
- block[0] = dims[0] / (hsize_t)mpi_size_g;
- block[1] = dims[1];
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank_g * block[0];
- start[1] = 0;
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
- VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
-
- /* verify the read data with original expected data */
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if (ret) {
- fprintf(stderr, "verify failed\n");
- exit(1);
- }
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- if (mpi_rank_g == 0)
- printf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
- memset(rdata, 0, bigcount * sizeof(B_DATATYPE));
- dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
-
- dims[0] = bigcount;
- dims[1] = 1;
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- if (mpi_rank_g == 0) {
- ret = H5Sselect_all(file_dataspace);
- VRFY_G((ret >= 0), "H5Sset_all succeeded");
- }
- else {
- ret = H5Sselect_none(file_dataspace);
- VRFY_G((ret >= 0), "H5Sset_none succeeded");
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "");
- if (mpi_rank_g != 0) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY_G((ret >= 0), "H5Sset_none succeeded");
- }
-
- /* fill dataset with test data */
- fill_datasets(start, dims, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
- VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
-
- if (mpi_rank_g == 0) {
- /* verify the read data with original expected data */
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if (ret) {
- fprintf(stderr, "verify failed\n");
- exit(1);
- }
- }
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- if (mpi_rank_g == 0)
- printf("\nRead Testing Dataset4 with Point selection\n");
- dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
-
- dims[0] = bigcount;
- dims[1] = (hsize_t)(mpi_size_g * 4);
-
- block[0] = dims[0] / 2;
- block[1] = 2;
- stride[0] = dims[0] / 2;
- stride[1] = 2;
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
-
- fill_datasets(start, block, wdata);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, wdata);
- }
-
- num_points = bigcount;
-
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY_G((coords != NULL), "coords malloc succeeded");
-
- set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
- /* create a file dataspace */
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
-
- if (coords)
- free(coords);
-
- /* create a memory dataspace */
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- mem_dataspace = H5Screate_simple(1, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
- VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
-
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if (ret) {
- fprintf(stderr, "verify failed\n");
- exit(1);
- }
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
- free(wdata);
- free(rdata);
-
- wdata = NULL;
- rdata = NULL;
- /* We never wrote Dataset5 in the write section, so we can't
- * expect to read it...
- */
- file_dataspace = -1;
- mem_dataspace = -1;
- xfer_plist = -1;
- dataset = -1;
-
- /* release all temporary handles. */
- if (file_dataspace != -1)
- H5Sclose(file_dataspace);
- if (mem_dataspace != -1)
- H5Sclose(mem_dataspace);
- if (xfer_plist != -1)
- H5Pclose(xfer_plist);
- if (dataset != -1) {
- ret = H5Dclose(dataset);
- VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- }
- H5Fclose(fid);
-
- /* release data buffers */
- if (rdata)
- free(rdata);
- if (wdata)
- free(wdata);
-
-} /* dataset_large_readAll */
-
-static void
-single_rank_independent_io(void)
-{
- if (mpi_rank_g == 0)
- printf("single_rank_independent_io\n");
-
- if (MAIN_PROCESS) {
- hsize_t dims[1];
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t fspace_id = -1;
- herr_t ret;
- int *data = NULL;
- uint64_t i;
-
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
-
- H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
- file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY_G((file_id >= 0), "H5Dcreate2 succeeded");
-
- /*
- * Calculate the number of elements needed to exceed
- * MPI's INT_MAX limitation
- */
- dims[0] = (INT_MAX / sizeof(int)) + 10;
-
- fspace_id = H5Screate_simple(1, dims, NULL);
- VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded");
-
- /*
- * Create and write to a >2GB dataset from a single rank.
- */
- dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
- H5P_DEFAULT);
-
- VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded");
-
- data = malloc(dims[0] * sizeof(int));
-
- /* Initialize data */
- for (i = 0; i < dims[0]; i++)
- data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT);
-
- /* Write data */
- ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
- VRFY_G((ret >= 0), "H5Dwrite succeeded");
-
- /* Wipe buffer */
- memset(data, 0, dims[0] * sizeof(int));
-
- /* Read data back */
- ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
- VRFY_G((ret >= 0), "H5Dread succeeded");
-
- /* Verify data */
- for (i = 0; i < dims[0]; i++)
- if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) {
- fprintf(stderr, "verify failed\n");
- exit(1);
- }
-
- free(data);
- H5Sclose(fspace_id);
- H5Dclose(dset_id);
- H5Fclose(file_id);
-
- H5Fdelete(FILENAME[1], fapl_id);
-
- H5Pclose(fapl_id);
- }
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*
- * Create the appropriate File access property list
- */
-hid_t
-create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
-{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
-
- /* need the rank for error checking macros */
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
-
- if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
-
- if (l_facc_type == FACC_MPIO) {
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY_G((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
- VRFY_G((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, true);
- VRFY_G((ret >= 0), "");
- return (ret_pl);
- }
-
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY_G((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY_G((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return (ret_pl);
- }
-
- /* unknown file access types */
- return (ret_pl);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk1
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with a single chunk
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: One big singular selection inside one chunk
- * Two dimensions,
- *
- * dim1 = space_dim1(5760)*mpi_size
- * dim2 = space_dim2(3)
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = space_dim1(5760)
- * count1 = space_dim2(3)
- * start0 = mpi_rank*space_dim1
- * start1 = 0
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk1(void)
-{
- const char *filename = FILENAME[0];
- if (mpi_rank_g == 0)
- printf("coll_chunk1\n");
-
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk2
- *
- * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
- selection with a single chunk
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: many disjoint selections inside one chunk
- * Two dimensions,
- *
- * dim1 = space_dim1*mpi_size(5760)
- * dim2 = space_dim2(3)
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 3 for all dimensions
- * count0 = space_dim1/stride0(5760/3)
- * count1 = space_dim2/stride(3/3 = 1)
- * start0 = mpi_rank*space_dim1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-void
-coll_chunk2(void)
-{
- const char *filename = FILENAME[0];
- if (mpi_rank_g == 0)
- printf("coll_chunk2\n");
-
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk3
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = space_dim1*mpi_size
- * dim2 = space_dim2(3)
- * chunk_dim1 = space_dim1
- * chunk_dim2 = dim2/2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = space_dim1
- * count1 = space_dim2(3)
- * start0 = mpi_rank*space_dim1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk3(void)
-{
- const char *filename = FILENAME[0];
- if (mpi_rank_g == 0)
- printf("coll_chunk3\n");
-
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-//-------------------------------------------------------------------------
-// Borrowed/Modified (slightly) from t_coll_chunk.c
-/*-------------------------------------------------------------------------
- * Function: coll_chunktest
- *
- * Purpose: The real testing routine for regular selection of collective
- chunking storage
- testing both write and read,
- If anything fails, it may be read or write. There is no
- separation test between read and write.
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-
-static void
-coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
- int mem_selection, int mode)
-{
- hid_t file, dataset, file_dataspace, mem_dataspace;
- hid_t acc_plist, xfer_plist, crp_plist;
-
- hsize_t dims[RANK], chunk_dims[RANK];
- int *data_array1 = NULL;
- int *data_origin1 = NULL;
-
- hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- unsigned prop_value;
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- herr_t status;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
-
- /* Create the data space */
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY_G((acc_plist >= 0), "");
-
- file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
- VRFY_G((file >= 0), "H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY_G((status >= 0), "");
-
- /* setup dimensionality object */
- dims[0] = space_dim1 * (hsize_t)mpi_size_g;
- dims[1] = space_dim2;
-
- /* allocate memory for data buffer */
- data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
-
- /* set up the coords array selection */
- num_points = block[0] * block[1] * count[0] * count[1];
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY_G((coords != NULL), "coords malloc succeeded");
- point_set(start, count, stride, block, num_points, coords, mode);
-
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
- }
- else {
- /* Putting the warning about H5Screate_simple (above) into practice... */
- hsize_t dsdims[1] = {num_points};
- mem_dataspace = H5Screate_simple(1, dsdims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY_G((crp_plist >= 0), "");
-
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
-
- /* to decrease the testing time, maintain bigger chunk size */
- (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2);
- status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY_G((status >= 0), "chunk creation property list succeeded");
-
- dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
- crp_plist, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "dataset created succeeded");
-
- status = H5Pclose(crp_plist);
- VRFY_G((status >= 0), "");
-
- /*put some trivial data in the data array */
- ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
-
- MESG("data_array initialized");
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY_G((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY_G((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* set up the collective transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((status >= 0), "set independent IO collectively succeeded");
- }
-
- switch (api_option) {
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
- VRFY_G((status >= 0), "collective chunk optimization succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY_G((status >= 0), "collective chunk optimization succeeded ");
- break;
-
- case API_LINK_TRUE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
- VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
- VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
- VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
- VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
- VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
- VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
- break;
-
- default:;
- }
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if (facc_type == FACC_MPIO) {
- switch (api_option) {
- case API_LINK_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_LINK_TRUE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_LINK_FALSE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_COLL:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
- H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_IND:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0), "testing property list inserted succeeded");
- break;
-
- default:;
- }
- }
-#endif
-
- /* write data collectively */
- status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY_G((status >= 0), "dataset write succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if (facc_type == FACC_MPIO) {
- switch (api_option) {
- case API_LINK_HARD:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
- break;
-
- case API_LINK_TRUE:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
- VRFY_G((status >= 0), "testing property list get succeeded");
- VRFY_G((prop_value == 0),
- "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
- break;
-
- default:;
- }
- }
-#endif
-
- status = H5Dclose(dataset);
- VRFY_G((status >= 0), "");
-
- status = H5Pclose(xfer_plist);
- VRFY_G((status >= 0), "property list closed");
-
- status = H5Sclose(file_dataspace);
- VRFY_G((status >= 0), "");
-
- status = H5Sclose(mem_dataspace);
- VRFY_G((status >= 0), "");
-
- status = H5Fclose(file);
- VRFY_G((status >= 0), "");
-
- if (data_array1)
- free(data_array1);
-
- /* Use collective read to verify the correctness of collective write. */
-
- /* allocate memory for data buffer */
- data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* allocate memory for data buffer */
- data_origin1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded");
-
- file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist);
- VRFY_G((file >= 0), "H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY_G((status >= 0), "");
-
- /* open the collective dataset*/
- dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
-
- /* obtain the file and mem dataspace*/
- file_dataspace = H5Dget_space(dataset);
- VRFY_G((file_dataspace >= 0), "");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Dget_space(dataset);
- VRFY_G((mem_dataspace >= 0), "");
- }
- else {
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- hsize_t dsdims[1] = {num_points};
- mem_dataspace = H5Screate_simple(1, dsdims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY_G((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY_G((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* fill dataset with test data */
- ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((status >= 0), "set independent IO collectively succeeded");
- }
-
- status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY_G((status >= 0), "dataset read succeeded");
-
- /* verify the read data with original expected data */
- status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
- if (status)
- nerrors++;
-
- status = H5Pclose(xfer_plist);
- VRFY_G((status >= 0), "property list closed");
-
- /* close dataset collectively */
- status = H5Dclose(dataset);
- VRFY_G((status >= 0), "H5Dclose");
-
- /* release all IDs created */
- status = H5Sclose(file_dataspace);
- VRFY_G((status >= 0), "H5Sclose");
-
- status = H5Sclose(mem_dataspace);
- VRFY_G((status >= 0), "H5Sclose");
-
- /* close the file collectively */
- status = H5Fclose(file);
- VRFY_G((status >= 0), "H5Fclose");
-
- /* release data buffers */
- if (coords)
- free(coords);
- if (data_array1)
- free(data_array1);
- if (data_origin1)
- free(data_origin1);
-}
-
-int
-main(int argc, char **argv)
-{
- hid_t acc_plist = H5I_INVALID_HID;
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g);
-
- /* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hung in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
- if (H5dont_atexit() < 0)
- printf("Failed to turn off atexit processing. Continue.\n");
-
- /* set alarm. */
- /* TestAlarmOn(); */
-
- acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
-
- /* Get the capability flag of the VOL connector being used */
- if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) {
- if (MAIN_PROCESS)
- printf("Failed to get the capability flag of the VOL connector being used\n");
-
- MPI_Finalize();
- return 0;
- }
-
- /* Make sure the connector supports the API functions being tested. This test only
- * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close,
- * and H5Dget_space. */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAIN_PROCESS)
- printf(
- "API functions for basic file, dataset basic or more aren't supported with this connector\n");
-
- MPI_Finalize();
- return 0;
- }
-
- dataset_big_write();
- MPI_Barrier(MPI_COMM_WORLD);
-
- dataset_big_read();
- MPI_Barrier(MPI_COMM_WORLD);
-
- coll_chunk1();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk2();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk3();
- MPI_Barrier(MPI_COMM_WORLD);
-
- single_rank_independent_io();
-
- /* turn off alarm */
- /* TestAlarmOff(); */
-
- if (mpi_rank_g == 0) {
- hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
-
- H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
-
- H5E_BEGIN_TRY
- {
- H5Fdelete(FILENAME[0], fapl_id);
- H5Fdelete(FILENAME[1], fapl_id);
- }
- H5E_END_TRY
-
- H5Pclose(fapl_id);
- }
-
- H5Pclose(acc_plist);
-
- /* close HDF5 library */
- H5close();
-
- MPI_Finalize();
-
- return 0;
-}
diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c
deleted file mode 100644
index 673563b..0000000
--- a/testpar/API/t_chunk_alloc.c
+++ /dev/null
@@ -1,507 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * This verifies if the storage space allocation methods are compatible between
- * serial and parallel modes.
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-static int mpi_size, mpi_rank;
-
-#define DSET_NAME "ExtendibleArray"
-#define CHUNK_SIZE 1000 /* #elements per chunk */
-#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
-#define CLOSE 1
-#define NO_CLOSE 0
-
-#if 0
-static MPI_Offset
-get_filesize(const char *filename)
-{
- int mpierr;
- MPI_File fd;
- MPI_Offset filesize;
-
- mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd);
- VRFY((mpierr == MPI_SUCCESS), "");
-
- mpierr = MPI_File_get_size(fd, &filesize);
- VRFY((mpierr == MPI_SUCCESS), "");
-
- mpierr = MPI_File_close(&fd);
- VRFY((mpierr == MPI_SUCCESS), "");
-
- return (filesize);
-}
-#endif
-
-typedef enum write_pattern { none, sec_last, all } write_type;
-
-typedef enum access_ { write_all, open_only, extend_only } access_type;
-
-/*
- * This creates a dataset serially with chunks, each of CHUNK_SIZE
- * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
- * routine will open this in parallel for extension test.
- */
-static void
-create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
-{
- hid_t file_id, dataset; /* handles */
- hid_t dataspace, memspace;
- hid_t cparms;
- hsize_t dims[1];
- hsize_t maxdims[1] = {H5S_UNLIMITED};
-
- hsize_t chunk_dims[1] = {CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
- /* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- long nchunks;
- herr_t hrc;
-#if 0
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
-#endif
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Only MAINPROCESS should create the file. Others just wait. */
- if (MAINPROCESS) {
- nchunks = chunk_factor * mpi_size;
- dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
- /* Create the data space with unlimited dimensions. */
- dataspace = H5Screate_simple(1, dims, maxdims);
- VRFY((dataspace >= 0), "");
-
- memspace = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((memspace >= 0), "");
-
- /* Create a new file. If file exists its contents will be overwritten. */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((file_id >= 0), "H5Fcreate");
-
- /* Modify dataset creation properties, i.e. enable chunking */
- cparms = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((cparms >= 0), "");
-
- hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
- VRFY((hrc >= 0), "");
-
- hrc = H5Pset_chunk(cparms, 1, chunk_dims);
- VRFY((hrc >= 0), "");
-
- /* Create a new dataset within the file using cparms creation properties. */
- dataset =
- H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
-
- if (write_pattern == sec_last) {
- memset(buffer, 100, CHUNK_SIZE);
-
- count[0] = 1;
- stride[0] = 1;
- block[0] = chunk_dims[0];
- offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0];
-
- hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
-
- /* Write sec_last chunk */
- hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
- } /* end if */
-
- /* Close resources */
- hrc = H5Dclose(dataset);
- VRFY((hrc >= 0), "");
- dataset = -1;
-
- hrc = H5Sclose(dataspace);
- VRFY((hrc >= 0), "");
-
- hrc = H5Sclose(memspace);
- VRFY((hrc >= 0), "");
-
- hrc = H5Pclose(cparms);
- VRFY((hrc >= 0), "");
-
- hrc = H5Fclose(file_id);
- VRFY((hrc >= 0), "");
- file_id = -1;
-
-#if 0
- /* verify file size */
- filesize = get_filesize(filename);
- est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
- VRFY((filesize >= est_filesize), "file size check");
-#endif
- }
-
- /* Make sure all processes are done before exiting this routine. Otherwise,
- * other tests may start and change the test data file before some processes
- * of this test are still accessing the file.
- */
-
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*
- * This program performs three different types of parallel access. It writes on
- * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
- * opens the dataset. At the end, it verifies the size of the dataset to be
- * consistent with argument 'chunk_factor'.
- */
-static void
-parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
- hid_t *dataset)
-{
- hid_t memspace, dataspace; /* HDF5 file identifier */
- hid_t access_plist; /* HDF5 ID for file access property list */
- herr_t hrc; /* HDF5 return code */
- hsize_t size[1];
-
- hsize_t chunk_dims[1] = {CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
- hsize_t dims[1];
- hsize_t maxdims[1];
-
- /* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- int i;
- long nchunks;
-#if 0
- /* MPI Gubbins */
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
-#endif
-
- /* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- nchunks = chunk_factor * mpi_size;
-
- /* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((access_plist >= 0), "");
-
- hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((hrc >= 0), "");
-
- /* Open the file */
- if (*file_id < 0) {
- *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
- VRFY((*file_id >= 0), "");
- }
-
- /* Open dataset*/
- if (*dataset < 0) {
- *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
- VRFY((*dataset >= 0), "");
- }
-
- /* Make sure all processes are done before continuing. Otherwise, one
- * process could change the dataset extent before another finishes opening
- * it, resulting in only some of the processes calling H5Dset_extent(). */
- MPI_Barrier(MPI_COMM_WORLD);
-
- memspace = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((memspace >= 0), "");
-
- dataspace = H5Dget_space(*dataset);
- VRFY((dataspace >= 0), "");
-
- size[0] = (hsize_t)nchunks * CHUNK_SIZE;
-
- switch (action) {
-
- /* all chunks are written by all the processes in an interleaved way*/
- case write_all:
-
- memset(buffer, mpi_rank + 1, CHUNK_SIZE);
- count[0] = 1;
- stride[0] = 1;
- block[0] = chunk_dims[0];
- for (i = 0; i < nchunks / mpi_size; i++) {
- offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0];
-
- hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
-
- /* Write the buffer out */
- hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
- }
-
- break;
-
- /* only extends the dataset */
- case extend_only:
- /* check if new size is larger than old size */
- hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims);
- VRFY((hrc >= 0), "");
-
- /* Extend dataset*/
- if (size[0] > dims[0]) {
- hrc = H5Dset_extent(*dataset, size);
- VRFY((hrc >= 0), "");
- }
- break;
-
- /* only opens the *dataset */
- case open_only:
- break;
- default:
- assert(0);
- }
-
- /* Close up */
- hrc = H5Dclose(*dataset);
- VRFY((hrc >= 0), "");
- *dataset = -1;
-
- hrc = H5Sclose(dataspace);
- VRFY((hrc >= 0), "");
-
- hrc = H5Sclose(memspace);
- VRFY((hrc >= 0), "");
-
- hrc = H5Fclose(*file_id);
- VRFY((hrc >= 0), "");
- *file_id = -1;
-
-#if 0
- /* verify file size */
- filesize = get_filesize(filename);
- est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
- VRFY((filesize >= est_filesize), "file size check");
-#endif
-
- /* Can close some plists */
- hrc = H5Pclose(access_plist);
- VRFY((hrc >= 0), "");
-
- /* Make sure all processes are done before exiting this routine. Otherwise,
- * other tests may start and change the test data file before some processes
- * of this test are still accessing the file.
- */
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*
- * This routine verifies the data written in the dataset. It does one of the
- * three cases according to the value of parameter `write_pattern'.
- * 1. it returns correct fill values though the dataset has not been written;
- * 2. it still returns correct fill values though only a small part is written;
- * 3. it returns correct values when the whole dataset has been written in an
- * interleaved pattern.
- */
-static void
-verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
- hid_t *dataset)
-{
- hid_t dataspace, memspace; /* HDF5 file identifier */
- hid_t access_plist; /* HDF5 ID for file access property list */
- herr_t hrc; /* HDF5 return code */
-
- hsize_t chunk_dims[1] = {CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
- /* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- int value, i;
- int index_l;
- long nchunks;
- /* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- nchunks = chunk_factor * mpi_size;
-
- /* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((access_plist >= 0), "");
-
- hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((hrc >= 0), "");
-
- /* Open the file */
- if (*file_id < 0) {
- *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
- VRFY((*file_id >= 0), "");
- }
-
- /* Open dataset*/
- if (*dataset < 0) {
- *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
- VRFY((*dataset >= 0), "");
- }
-
- memspace = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((memspace >= 0), "");
-
- dataspace = H5Dget_space(*dataset);
- VRFY((dataspace >= 0), "");
-
- /* all processes check all chunks. */
- count[0] = 1;
- stride[0] = 1;
- block[0] = chunk_dims[0];
- for (i = 0; i < nchunks; i++) {
- /* reset buffer values */
- memset(buffer, -1, CHUNK_SIZE);
-
- offset[0] = (hsize_t)i * chunk_dims[0];
-
- hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
-
- /* Read the chunk */
- hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
- VRFY((hrc >= 0), "H5Dread");
-
- /* set expected value according the write pattern */
- switch (write_pattern) {
- case all:
- value = i % mpi_size + 1;
- break;
- case none:
- value = 0;
- break;
- case sec_last:
- if (i == nchunks - 2)
- value = 100;
- else
- value = 0;
- break;
- default:
- assert(0);
- }
-
- /* verify content of the chunk */
- for (index_l = 0; index_l < CHUNK_SIZE; index_l++)
- VRFY((buffer[index_l] == value), "data verification");
- }
-
- hrc = H5Sclose(dataspace);
- VRFY((hrc >= 0), "");
-
- hrc = H5Sclose(memspace);
- VRFY((hrc >= 0), "");
-
- /* Can close some plists */
- hrc = H5Pclose(access_plist);
- VRFY((hrc >= 0), "");
-
- /* Close up */
- if (vclose) {
- hrc = H5Dclose(*dataset);
- VRFY((hrc >= 0), "");
- *dataset = -1;
-
- hrc = H5Fclose(*file_id);
- VRFY((hrc >= 0), "");
- *file_id = -1;
- }
-
- /* Make sure all processes are done before exiting this routine. Otherwise,
- * other tests may start and change the test data file before some processes
- * of this test are still accessing the file.
- */
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*
- * Test following possible scenarios,
- * Case 1:
- * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
- * size, no write, close, reopen in parallel, read to verify all return
- * the fill value.
- * Case 2:
- * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small
- * size, no write, close, reopen in parallel, extend to large size, then close,
- * then reopen in parallel and read to verify all return the fill value.
- * Case 3:
- * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
- * size, write just a small part of the dataset (second to the last), close,
- * then reopen in parallel, read to verify all return the fill value except
- * those small portion that has been written. Without closing it, writes
- * all parts of the dataset in a interleave pattern, close it, and reopen
- * it, read to verify all data are as written.
- */
-void
-test_chunk_alloc(void)
-{
- const char *filename;
- hid_t file_id, dataset;
-
- file_id = dataset = -1;
-
- /* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend Chunked allocation test on file %s\n", filename);
-
- /* Case 1 */
- /* Create chunked dataset without writing anything.*/
- create_chunked_dataset(filename, CHUNK_FACTOR, none);
- /* reopen dataset in parallel and check for file size */
- parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset);
- /* reopen dataset in parallel, read and verify the data */
- verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
-
- /* Case 2 */
- /* Create chunked dataset without writing anything */
- create_chunked_dataset(filename, 20, none);
- /* reopen dataset in parallel and only extend it */
- parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset);
- /* reopen dataset in parallel, read and verify the data */
- verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
-
- /* Case 3 */
- /* Create chunked dataset and write in the second to last chunk */
- create_chunked_dataset(filename, CHUNK_FACTOR, sec_last);
- /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/
- verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset);
- /* All processes write in all the chunks in a interleaved way */
- parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
- /* reopen dataset in parallel, read and verify the data */
- verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
-}
diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c
deleted file mode 100644
index 99f845f..0000000
--- a/testpar/API/t_coll_chunk.c
+++ /dev/null
@@ -1,1345 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#define HYPER 1
-#define POINT 2
-#define ALL 3
-
-/* some commonly used routines for collective chunk IO tests*/
-
-static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[],
- hsize_t block[], int mode);
-
-static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- DATATYPE *dataset, int mem_selection);
-
-static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset);
-
-static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- DATATYPE *dataset, DATATYPE *original, int mem_selection);
-
-static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
- int file_selection, int mem_selection, int mode);
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk1
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with a single chunk
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: One big singular selection inside one chunk
- * Two dimensions,
- *
- * dim1 = SPACE_DIM1(5760)*mpi_size
- * dim2 = SPACE_DIM2(3)
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1(5760)
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk1(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk2
- *
- * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
- selection with a single chunk
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: many disjoint selections inside one chunk
- * Two dimensions,
- *
- * dim1 = SPACE_DIM1*mpi_size(5760)
- * dim2 = SPACE_DIM2(3)
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 3 for all dimensions
- * count0 = SPACE_DIM1/stride0(5760/3)
- * count1 = SPACE_DIM2/stride(3/3 = 1)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-void
-coll_chunk2(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk3
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2(3)
- * chunk_dim1 = SPACE_DIM1
- * chunk_dim2 = dim2/2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk3(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_size;
- int mpi_rank;
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk4
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk4(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk4
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk5(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk6
- *
- * Purpose: Test direct request for multi-chunk-io.
- * Wrapper to test the collective chunk IO for regular JOINT
- * selection with at least number of 2*mpi_size chunks
- * Test for direct to Multi Chunk I/O.
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk6(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk7
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk7(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk8
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk8(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk9
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk9(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunk10
- *
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection across many chunks
- * Two dimensions, Num of chunks = 2* mpi_size
- *
- * dim1 = SPACE_DIM1*mpi_size
- * dim2 = SPACE_DIM2
- * chunk_dim1 = dim1
- * chunk_dim2 = dim2
- * block = 1 for all dimensions
- * stride = 1 for all dimensions
- * count0 = SPACE_DIM1
- * count1 = SPACE_DIM2(3)
- * start0 = mpi_rank*SPACE_DIM1
- * start1 = 0
- *
- * ------------------------------------------------------------------------
- */
-
-void
-coll_chunk10(void)
-{
- const char *filename = PARATESTFILE /* GetTestParameters() */;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_chunktest
- *
- * Purpose: The real testing routine for regular selection of collective
- chunking storage
- testing both write and read,
- If anything fails, it may be read or write. There is no
- separation test between read and write.
- *
- * Return: Success: 0
- *
- * Failure: -1
- *-------------------------------------------------------------------------
- */
-
-static void
-coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
- int mem_selection, int mode)
-{
- hid_t file, dataset, file_dataspace, mem_dataspace;
- hid_t acc_plist, xfer_plist, crp_plist;
-
- hsize_t dims[RANK], chunk_dims[RANK];
- int *data_array1 = NULL;
- int *data_origin1 = NULL;
-
- hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- unsigned prop_value;
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- int mpi_size, mpi_rank;
-
- herr_t status;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
-
- /* set up MPI parameters */
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /* Create the data space */
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_plist >= 0), "");
-
- file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
- VRFY((file >= 0), "H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY((status >= 0), "");
-
- /* setup dimensionality object */
- dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size);
- dims[1] = SPACE_DIM2;
-
- /* allocate memory for data buffer */
- data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
-
- /* set up the coords array selection */
- num_points = block[0] * block[1] * count[0] * count[1];
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
- point_set(start, count, stride, block, num_points, coords, mode);
-
- file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((file_dataspace >= 0), "file dataspace created succeeded");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
- }
- else {
- current_dims = num_points;
- mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((crp_plist >= 0), "");
-
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
-
- /* to decrease the testing time, maintain bigger chunk size */
- (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2);
- status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY((status >= 0), "chunk creation property list succeeded");
-
- dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
- crp_plist, H5P_DEFAULT);
- VRFY((dataset >= 0), "dataset created succeeded");
-
- status = H5Pclose(crp_plist);
- VRFY((status >= 0), "");
-
- /*put some trivial data in the data array */
- ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
-
- MESG("data_array initialized");
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* set up the collective transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status >= 0), "MPIO collective transfer property succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status >= 0), "set independent IO collectively succeeded");
- }
-
- switch (api_option) {
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
- VRFY((status >= 0), "collective chunk optimization succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((status >= 0), "collective chunk optimization succeeded ");
- break;
-
- case API_LINK_TRUE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
- VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
- VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
- VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
- VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
- VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
- VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
- break;
-
- default:;
- }
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if (facc_type == FACC_MPIO) {
- switch (api_option) {
- case API_LINK_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_LINK_TRUE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_LINK_FALSE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_COLL:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
- H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- case API_MULTI_IND:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status =
- H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
- &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0), "testing property list inserted succeeded");
- break;
-
- default:;
- }
- }
-#endif
-
- /* write data collectively */
- status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((status >= 0), "dataset write succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- /* Only check chunk optimization mode if selection I/O is not being used -
- * selection I/O bypasses this IO mode decision - it's effectively always
- * multi chunk currently */
- if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ true) {
- switch (api_option) {
- case API_LINK_HARD:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
- break;
-
- case API_LINK_TRUE:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
- VRFY((status >= 0), "testing property list get succeeded");
- VRFY((prop_value == 0),
- "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
- break;
-
- default:;
- }
- }
-#endif
-
- status = H5Dclose(dataset);
- VRFY((status >= 0), "");
-
- status = H5Pclose(xfer_plist);
- VRFY((status >= 0), "property list closed");
-
- status = H5Sclose(file_dataspace);
- VRFY((status >= 0), "");
-
- status = H5Sclose(mem_dataspace);
- VRFY((status >= 0), "");
-
- status = H5Fclose(file);
- VRFY((status >= 0), "");
-
- if (data_array1)
- free(data_array1);
-
- /* Use collective read to verify the correctness of collective write. */
-
- /* allocate memory for data buffer */
- data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* allocate memory for data buffer */
- data_origin1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_plist >= 0), "MPIO creation property list succeeded");
-
- file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
- VRFY((file >= 0), "H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY((status >= 0), "");
-
- /* open the collective dataset*/
- dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
-
- /* obtain the file and mem dataspace*/
- file_dataspace = H5Dget_space(dataset);
- VRFY((file_dataspace >= 0), "");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Dget_space(dataset);
- VRFY((mem_dataspace >= 0), "");
- }
- else {
- current_dims = num_points;
- mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0), "hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0), "Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0), "none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* fill dataset with test data */
- ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status >= 0), "MPIO collective transfer property succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status >= 0), "set independent IO collectively succeeded");
- }
-
- status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((status >= 0), "dataset read succeeded");
-
- /* verify the read data with original expected data */
- status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
- if (status)
- nerrors++;
-
- status = H5Pclose(xfer_plist);
- VRFY((status >= 0), "property list closed");
-
- /* close dataset collectively */
- status = H5Dclose(dataset);
- VRFY((status >= 0), "H5Dclose");
-
- /* release all IDs created */
- status = H5Sclose(file_dataspace);
- VRFY((status >= 0), "H5Sclose");
-
- status = H5Sclose(mem_dataspace);
- VRFY((status >= 0), "H5Sclose");
-
- /* close the file collectively */
- status = H5Fclose(file);
- VRFY((status >= 0), "H5Fclose");
-
- /* release data buffers */
- if (coords)
- free(coords);
- if (data_array1)
- free(data_array1);
- if (data_origin1)
- free(data_origin1);
-}
-
-/* Set up the selection */
-static void
-ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- int mode)
-{
-
- switch (mode) {
-
- case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = SPACE_DIM1;
- count[1] = SPACE_DIM2;
- start[0] = (hsize_t)mpi_rank * count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = SPACE_DIM1 / (stride[0] * block[0]);
- count[1] = (SPACE_DIM2) / (stride[1] * block[1]);
- start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank;
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
- no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1);
- count[1] = SPACE_DIM2;
- start[0] = (hsize_t)mpi_rank * count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTUNBALANCE:
- /* The first one-third of the number of processes only
- select top half of the domain, The rest will select the bottom
- half of the domain. */
-
- block[0] = 1;
- count[0] = 2;
- stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1;
- block[1] = SPACE_DIM2;
- count[1] = 1;
- start[1] = 0;
- stride[1] = 1;
- if ((mpi_rank * 3) < (mpi_size * 2))
- start[0] = (hsize_t)mpi_rank;
- else
- start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3));
- break;
-
- case BYROW_SELECTINCHUNK:
- /* Each process will only select one chunk */
-
- block[0] = 1;
- count[0] = 1;
- start[0] = (hsize_t)(mpi_rank * SPACE_DIM1);
- stride[0] = 1;
- block[1] = SPACE_DIM2;
- count[1] = 1;
- stride[1] = 1;
- start[1] = 0;
-
- break;
-
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size;
- block[1] = SPACE_DIM2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
- }
- if (VERBOSE_MED) {
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
- }
-}
-
-/*
- * Fill the dataset with trivial data for testing.
- * Assume dimension rank is 2.
- */
-static void
-ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
- int mem_selection)
-{
- DATATYPE *dataptr = dataset;
- DATATYPE *tmptr;
- hsize_t i, j, k1, k2, k = 0;
- /* put some trivial data in the data_array */
- tmptr = dataptr;
-
- /* assign the disjoint block (two-dimensional)data array value
- through the pointer */
-
- for (k1 = 0; k1 < count[0]; k1++) {
- for (i = 0; i < block[0]; i++) {
- for (k2 = 0; k2 < count[1]; k2++) {
- for (j = 0; j < block[1]; j++) {
-
- if (ALL != mem_selection) {
- dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
- k2 * stride[1] + j);
- }
- else {
- dataptr = tmptr + k;
- k++;
- }
-
- *dataptr = (DATATYPE)(k1 + k2 + i + j);
- }
- }
- }
- }
-}
-
-/*
- * Print the first block of the content of the dataset.
- */
-static void
-ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
-
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
- for (j = 0; j < block[1]; j++) {
- printf("%3lu ", (unsigned long)(start[1] + j));
- }
- printf("\n");
-
- /* print the slab data */
- for (i = 0; i < block[0]; i++) {
- printf("Row %2lu: ", (unsigned long)(i + start[0]));
- for (j = 0; j < block[1]; j++) {
- printf("%03d ", *dataptr++);
- }
- printf("\n");
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-static int
-ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
- DATATYPE *original, int mem_selection)
-{
- hsize_t i, j, k1, k2, k = 0;
- int vrfyerrs;
- DATATYPE *dataptr, *oriptr;
-
- /* print it if VERBOSE_MED */
- if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- ccdataset_print(start, block, original);
- printf("compared values:\n");
- ccdataset_print(start, block, dataset);
- }
-
- vrfyerrs = 0;
-
- for (k1 = 0; k1 < count[0]; k1++) {
- for (i = 0; i < block[0]; i++) {
- for (k2 = 0; k2 < count[1]; k2++) {
- for (j = 0; j < block[1]; j++) {
- if (ALL != mem_selection) {
- dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
- k2 * stride[1] + j);
- oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
- k2 * stride[1] + j);
- }
- else {
- dataptr = dataset + k;
- oriptr = original + k;
- k++;
- }
- if (*dataptr != *oriptr) {
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
- }
- }
- }
- }
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
- return (vrfyerrs);
-}
diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c
deleted file mode 100644
index 353d5f6..0000000
--- a/testpar/API/t_coll_md_read.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * A test suite to test HDF5's collective metadata read and write capabilities,
- * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or
- * H5Pset_coll_metadata_write().
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-/*
- * Define the non-participating process as the "last"
- * rank to avoid any weirdness potentially caused by
- * an if (mpi_rank == 0) check.
- */
-#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
-#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
-#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
-#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
-#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
-
-#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
-
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
-
-#define COLL_GHEAP_WRITE_ATTR_NELEMS 10
-#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr"
-#define COLL_GHEAP_WRITE_ATTR_DIMS 1
-
-/*
- * A test for issue HDFFV-10501. A parallel hang was reported which occurred
- * in linked-chunk I/O when collective metadata reads are enabled and some ranks
- * do not have any selection in a dataset's dataspace, while others do. The ranks
- * which have no selection during the read/write operation called H5D__chunk_addrmap()
- * to retrieve the lowest chunk address, since we require that the read/write be done
- * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
- * this caused the non-participating ranks to issue a collective MPI_Bcast() call
- * which the other ranks did not issue, thus causing a hang.
- *
- * However, since these ranks are not actually reading/writing anything, this call
- * can simply be removed and the address used for the read/write can be set to an
- * arbitrary number (0 was chosen).
- */
-void
-test_partial_no_selection_coll_md_read(void)
-{
- const char *filename;
- hsize_t *dataset_dims = NULL;
- hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hsize_t sel_dims[1];
- hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE,
- PARTIAL_NO_SELECTION_X_DIM_SCALE};
- hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t fspace_id = H5I_INVALID_HID;
- hid_t mspace_id = H5I_INVALID_HID;
- int mpi_rank, mpi_size;
- void *data = NULL;
- void *read_buf = NULL;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or file flush aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
-
- /*
- * Even though the testphdf5 framework currently sets collective metadata reads
- * on the FAPL, we call it here just to be sure this is futureproof, since
- * demonstrating this issue relies upon it.
- */
- VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
- VRFY((dataset_dims != NULL), "malloc succeeded");
-
- dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
- dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
- max_dataset_dims[0] = H5S_UNLIMITED;
- max_dataset_dims[1] = H5S_UNLIMITED;
-
- fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
- VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
-
- /*
- * Set up chunking on the dataset in order to reproduce the problem.
- */
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
-
- VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0),
- "H5Pset_chunk succeeded");
-
- dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT,
- dcpl_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
-
- /*
- * Setup hyperslab selection to split the dataset among the ranks.
- *
- * The ranks will write rows across the dataset.
- */
- start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
- start[1] = 0;
- stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
- stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
- count[0] = 1;
- count[1] = (hsize_t)mpi_size;
- block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
- block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
-
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "H5Sselect_hyperslab succeeded");
-
- sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
-
- mspace_id = H5Screate_simple(1, sel_dims, NULL);
- VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
-
- data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
- sizeof(int));
- VRFY((data != NULL), "calloc succeeded");
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
-
- /*
- * Enable collective access for the data transfer.
- */
- VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
-
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
-
- VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
-
- /*
- * Ensure that linked-chunk I/O is performed since this is
- * the particular code path where the issue lies and we don't
- * want the library doing multi-chunk I/O behind our backs.
- */
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
- "H5Pset_dxpl_mpio_chunk_opt succeeded");
-
- read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
- sizeof(int));
- VRFY((read_buf != NULL), "malloc succeeded");
-
- /*
- * Make sure to call H5Sselect_none() on the non-participating process.
- */
- if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
- VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
- VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
- }
-
- /*
- * Finally have each rank read their section of data back from the dataset.
- */
- VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
- "H5Dread succeeded");
-
- /*
- * Check data integrity just to be sure.
- */
- if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
- VRFY((!memcmp(data, read_buf,
- count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
- sizeof(int))),
- "memcmp succeeded");
- }
-
- if (dataset_dims) {
- free(dataset_dims);
- dataset_dims = NULL;
- }
-
- if (data) {
- free(data);
- data = NULL;
- }
-
- if (read_buf) {
- free(read_buf);
- read_buf = NULL;
- }
-
- VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
- VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
-}
-
-/*
- * A test for HDFFV-10562 which attempts to verify that using multi-chunk
- * I/O with collective metadata reads enabled doesn't causes issues due to
- * collective metadata reads being made only by process 0 in H5D__chunk_addrmap().
- *
- * Failure in this test may either cause a hang, or, due to how the MPI calls
- * pertaining to this issue might mistakenly match up, may cause an MPI error
- * message similar to:
- *
- * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed
- * major: Internal error (too specific to document in detail)
- * minor: Some MPI function failed
- * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
- *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006)
- *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................:
- *MPIR_Bcast_intra(1249)............:
- *MPIR_SMP_Bcast(1088)..............:
- *MPIR_Bcast_binomial(239)..........:
- *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer
- *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String
- *
- */
-void
-test_multi_chunk_io_addrmap_issue(void)
-{
- const char *filename;
- hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
- hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
- hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t space_id = H5I_INVALID_HID;
- void *read_buf = NULL;
- int mpi_rank;
- int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}};
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or file flush aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
-
- /*
- * Even though the testphdf5 framework currently sets collective metadata reads
- * on the FAPL, we call it here just to be sure this is futureproof, since
- * demonstrating this issue relies upon it.
- */
- VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims);
- VRFY((space_id >= 0), "H5Screate_simple succeeded");
-
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
-
- VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0),
- "H5Pset_chunk succeeded");
-
- dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
-
- VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0),
- "H5Pset_dxpl_mpio_chunk_opt succeeded");
-
- start[1] = 0;
- stride[0] = stride[1] = 1;
- count[0] = count[1] = 5;
- block[0] = block[1] = 1;
-
- if (mpi_rank == 0)
- start[0] = 0;
- else
- start[0] = 5;
-
- VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "H5Sselect_hyperslab succeeded");
- if (mpi_rank != 0)
- VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
-
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
-
- VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
-
- read_buf = malloc(50 * sizeof(int));
- VRFY((read_buf != NULL), "malloc succeeded");
-
- VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
-
- if (read_buf) {
- free(read_buf);
- read_buf = NULL;
- }
-
- VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
- VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
-}
-
-/*
- * A test for HDFFV-10562 which attempts to verify that using linked-chunk
- * I/O with collective metadata reads enabled doesn't cause issues due to
- * collective metadata reads being made only by process 0 in H5D__sort_chunk().
- *
- * Failure in this test may either cause a hang, or, due to how the MPI calls
- * pertaining to this issue might mistakenly match up, may cause an MPI error
- * message similar to:
- *
- * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed
- * major: Internal error (too specific to document in detail)
- * minor: Some MPI function failed
- * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack:
- *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed
- *MPIR_Bcast_impl(1452)...:
- *MPIR_Bcast(1476)........:
- *MPIR_Bcast_intra(1249)..:
- *MPIR_SMP_Bcast(1088)....:
- *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received
- *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String
- */
-void
-test_link_chunk_io_sort_chunk_issue(void)
-{
- const char *filename;
- hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t fspace_id = H5I_INVALID_HID;
- hid_t mspace_id = H5I_INVALID_HID;
- int mpi_rank, mpi_size;
- void *data = NULL;
- void *read_buf = NULL;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or file flush aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
-
- /*
- * Even though the testphdf5 framework currently sets collective metadata reads
- * on the FAPL, we call it here just to be sure this is futureproof, since
- * demonstrating this issue relies upon it.
- */
- VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- /*
- * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM
- * chunks, where every rank writes to a piece of every single chunk to keep utilization high.
- */
- dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
-
- fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL);
- VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
-
- /*
- * Set up chunking on the dataset in order to reproduce the problem.
- */
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
-
- /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */
- chunk_dims[0] = (hsize_t)mpi_size;
-
- VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
- "H5Pset_chunk succeeded");
-
- dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id,
- H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
-
- /*
- * Setup hyperslab selection to split the dataset among the ranks.
- */
- start[0] = (hsize_t)mpi_rank;
- stride[0] = (hsize_t)mpi_size;
- count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
- block[0] = 1;
-
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "H5Sselect_hyperslab succeeded");
-
- sel_dims[0] = count[0];
-
- mspace_id = H5Screate_simple(1, sel_dims, NULL);
- VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
-
- data = calloc(1, count[0] * sizeof(int));
- VRFY((data != NULL), "calloc succeeded");
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
-
- /*
- * Enable collective access for the data transfer.
- */
- VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
-
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
-
- VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
-
- /*
- * Ensure that linked-chunk I/O is performed since this is
- * the particular code path where the issue lies and we don't
- * want the library doing multi-chunk I/O behind our backs.
- */
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
- "H5Pset_dxpl_mpio_chunk_opt succeeded");
-
- read_buf = malloc(count[0] * sizeof(int));
- VRFY((read_buf != NULL), "malloc succeeded");
-
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "H5Sselect_hyperslab succeeded");
-
- sel_dims[0] = count[0];
-
- VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
-
- mspace_id = H5Screate_simple(1, sel_dims, NULL);
- VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
-
- /*
- * Finally have each rank read their section of data back from the dataset.
- */
- VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
- "H5Dread succeeded");
-
- if (data) {
- free(data);
- data = NULL;
- }
-
- if (read_buf) {
- free(read_buf);
- read_buf = NULL;
- }
-
- VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
- VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
-}
-
-/*
- * A test for GitHub issue #2433 which causes a collective metadata write
- * of global heap data. This test is meant to ensure that global heap data
- * gets correctly mapped as raw data during a collective metadata write
- * using vector I/O.
- *
- * An assertion exists in the library that should be triggered if global
- * heap data is not correctly mapped as raw data.
- */
-void
-test_collective_global_heap_write(void)
-{
- const char *filename;
- hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS];
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t attr_id = H5I_INVALID_HID;
- hid_t vl_type = H5I_INVALID_HID;
- hid_t fspace_id = H5I_INVALID_HID;
- hvl_t vl_data;
- int mpi_rank, mpi_size;
- int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset or file flush aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
-
- /*
- * Even though the testphdf5 framework currently sets collective metadata
- * writes on the FAPL, we call it here just to be sure this is futureproof,
- * since demonstrating this issue relies upon it.
- */
- VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- attr_dims[0] = 1;
-
- fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL);
- VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
-
- vl_type = H5Tvlen_create(H5T_NATIVE_INT);
- VRFY((vl_type >= 0), "H5Tvlen_create succeeded");
-
- vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS;
- vl_data.p = data_buf;
-
- /*
- * Create a variable-length attribute that will get written to the global heap
- */
- attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((attr_id >= 0), "H5Acreate2 succeeded");
-
- for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++)
- data_buf[i] = (int)i;
-
- VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded");
-
- VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
- VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded");
- VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded");
- VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
- VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
-}
diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c
deleted file mode 100644
index 0da25b0..0000000
--- a/testpar/API/t_dset.c
+++ /dev/null
@@ -1,4317 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Parallel tests for datasets
- */
-
-/*
- * Example of using the parallel HDF5 library to access datasets.
- *
- * This program contains three major parts. Part 1 tests fixed dimension
- * datasets, for both independent and collective transfer modes.
- * Part 2 tests extendible datasets, for independent transfer mode
- * only.
- * Part 3 tests extendible datasets, for collective transfer mode
- * only.
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-/*
- * The following are various utility routines used by the tests.
- */
-
-/*
- * Setup the dimensions of the hyperslab.
- * Two modes--by rows or by columns.
- * Assume dimension rank is 2.
- * BYROW divide into slabs of rows
- * BYCOL divide into blocks of columns
- * ZROW same as BYROW except process 0 gets 0 rows
- * ZCOL same as BYCOL except process 0 gets 0 columns
- */
-static void
-slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- int mode)
-{
- switch (mode) {
- case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
- if (VERBOSE_MED)
- printf("slab_set BYROW\n");
- break;
- case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)(dim1 / mpi_size);
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank * block[1];
- if (VERBOSE_MED)
- printf("slab_set BYCOL\n");
- break;
- case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
- block[1] = (hsize_t)dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
- start[1] = 0;
- if (VERBOSE_MED)
- printf("slab_set ZROW\n");
- break;
- case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
- if (VERBOSE_MED)
- printf("slab_set ZCOL\n");
- break;
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- printf("unknown slab_set mode (%d)\n", mode);
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
- if (VERBOSE_MED)
- printf("slab_set wholeset\n");
- break;
- }
- if (VERBOSE_MED) {
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
- }
-}
-
-/*
- * Setup the coordinates for point selection.
- */
-void
-point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
- hsize_t coords[], int order)
-{
- hsize_t i, j, k = 0, m, n, s1, s2;
-
- HDcompile_assert(RANK == 2);
-
- if (OUT_OF_ORDER == order)
- k = (num_points * RANK) - 1;
- else if (IN_ORDER == order)
- k = 0;
-
- s1 = start[0];
- s2 = start[1];
-
- for (i = 0; i < count[0]; i++)
- for (j = 0; j < count[1]; j++)
- for (m = 0; m < block[0]; m++)
- for (n = 0; n < block[1]; n++)
- if (OUT_OF_ORDER == order) {
- coords[k--] = s2 + (stride[1] * j) + n;
- coords[k--] = s1 + (stride[0] * i) + m;
- }
- else if (IN_ORDER == order) {
- coords[k++] = s1 + stride[0] * i + m;
- coords[k++] = s2 + stride[1] * j + n;
- }
-
- if (VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
- k = 0;
- for (i = 0; i < num_points; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
- k += 2;
- }
- }
-}
-
-/*
- * Fill the dataset with trivial data for testing.
- * Assume dimension rank is 2 and data is stored contiguous.
- */
-static void
-dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* put some trivial data in the data_array */
- for (i = 0; i < block[0]; i++) {
- for (j = 0; j < block[1]; j++) {
- *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
- dataptr++;
- }
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-static void
-dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* print the column heading */
- printf("%-8s", "Cols:");
- for (j = 0; j < block[1]; j++) {
- printf("%3lu ", (unsigned long)(start[1] + j));
- }
- printf("\n");
-
- /* print the slab data */
- for (i = 0; i < block[0]; i++) {
- printf("Row %2lu: ", (unsigned long)(i + start[0]));
- for (j = 0; j < block[1]; j++) {
- printf("%03d ", *dataptr++);
- }
- printf("\n");
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-int
-dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
- DATATYPE *original)
-{
- hsize_t i, j;
- int vrfyerrs;
-
- /* print it if VERBOSE_MED */
- if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
- }
-
- vrfyerrs = 0;
- for (i = 0; i < block[0]; i++) {
- for (j = 0; j < block[1]; j++) {
- if (*dataset != *original) {
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
- (unsigned long)(j + start[1]), *(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (vrfyerrs)
- printf("%d errors found in dataset_vrfy\n", vrfyerrs);
- return (vrfyerrs);
-}
-
-/*
- * Part 1.a--Independent read/write for fixed dimension datasets.
- */
-
-/*
- * Example of using the parallel HDF5 library to create two datasets
- * in one HDF5 files with parallel MPIO access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset.
- */
-
-void
-dataset_writeInd(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- const char *filename;
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Independent write test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* ----------------------------------------
- * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
- * ---------------------------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* ---------------------------------------------
- * Define the dimensions of the overall datasets
- * and the slabs local to the MPI process.
- * ------------------------------------------- */
- /* setup dimensionality object */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create a dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- /* create another dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
- /*
- * To test the independent orders of writes between processes, all
- * even number processes write to dataset1 first, then dataset2.
- * All odd number processes write to dataset2 first, then dataset1.
- */
-
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
-
- /* setup dimensions again to write with zero rows for process 0 */
- if (VERBOSE_MED)
- printf("writeInd by some with zero row\n");
- slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- /* need to make mem_dataspace to match for process 0 */
- if (MAINPROCESS) {
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
- }
- MESG("writeInd by some with zero row");
- if ((mpi_rank / 2) * 2 != mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
- }
-#ifdef BARRIER_CHECKS
- MPI_Barrier(MPI_COMM_WORLD);
-#endif /* BARRIER_CHECKS */
-
- /* release dataspace ID */
- H5Sclose(file_dataspace);
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
-
- /* release all IDs created */
- H5Sclose(sid);
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
-}
-
-/* Example of using the parallel HDF5 library to read a dataset */
-void
-dataset_readInd(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
- const char *filename;
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Independent read test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
- VRFY((fid >= 0), "");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "");
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "");
-
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
-
- /* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "");
-
- /* release all IDs created */
- H5Sclose(file_dataspace);
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
- if (data_origin1)
- free(data_origin1);
-}
-
-/*
- * Part 1.b--Collective read/write for fixed dimension datasets.
- */
-
-/*
- * Example of using the parallel HDF5 library to create two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
- * each process controls a hyperslab within.]
- */
-
-void
-dataset_writeAll(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
- hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- const char *filename;
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Collective write test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* set up the coords array selection */
- num_points = (size_t)dim1;
- coords = (hsize_t *)malloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* --------------------------
- * Define the dimensions of the overall datasets
- * and create the dataset
- * ------------------------- */
- /* setup 2-D dimensionality object */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create a dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- /* create another dataset collectively */
- datatype = H5Tcopy(H5T_NATIVE_INT);
- ret = H5Tset_order(datatype, H5T_ORDER_LE);
- VRFY((ret >= 0), "H5Tset_order succeeded");
-
- dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded");
-
- /* create a third dataset collectively */
- dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset3 >= 0), "H5Dcreate2 succeeded");
-
- dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset5 >= 0), "H5Dcreate2 succeeded");
- dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset6 >= 0), "H5Dcreate2 succeeded");
- dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset7 >= 0), "H5Dcreate2 succeeded");
-
- /* release 2-D space ID created */
- H5Sclose(sid);
-
- /* setup scalar dimensionality object */
- sid = H5Screate(H5S_SCALAR);
- VRFY((sid >= 0), "H5Screate succeeded");
-
- /* create a fourth dataset collectively */
- dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset4 >= 0), "H5Dcreate2 succeeded");
-
- /* release scalar space ID created */
- H5Sclose(sid);
-
- /*
- * Set up dimensions of the slab this process accesses.
- */
-
- /* Dataset1: each process takes a block of rows. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- MESG("writeAll by Row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* setup dimensions again to writeAll with zero rows for process 0 */
- if (VERBOSE_MED)
- printf("writeAll by some with zero row\n");
- slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- /* need to make mem_dataspace to match for process 0 */
- if (MAINPROCESS) {
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
- }
- MESG("writeAll by some with zero row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
-
- /* release all temporary handles. */
- /* Could have used them for dataset2 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset2: each process takes a block of columns. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
-
- /* setup dimensions again to writeAll with zero columns for process 0 */
- if (VERBOSE_MED)
- printf("writeAll by some with zero col\n");
- slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- /* need to make mem_dataspace to match for process 0 */
- if (MAINPROCESS) {
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
- }
- MESG("writeAll by some with zero col");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
-
- /* release all temporary handles. */
- /* Could have used them for dataset3 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset3);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if (MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
- } /* end if */
- else {
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
- } /* end else */
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
- if (MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
- } /* end if */
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- } /* end if */
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
-
- /* write data collectively (with datatype conversion) */
- MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
-
- /* release all temporary handles. */
- /* Could have used them for dataset4 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset4: each process writes no data, except process zero uses "all" selection. */
- /* Additionally, these are in a scalar dataspace */
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset4);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if (MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
- } /* end if */
- else {
- ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none succeeded");
- } /* end else */
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate(H5S_SCALAR);
- VRFY((mem_dataspace >= 0), "");
- if (MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
- } /* end if */
- else {
- ret = H5Sselect_all(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none succeeded");
- } /* end else */
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- } /* end if */
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
-
- /* write data collectively (with datatype conversion) */
- MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- if (data_array1)
- free(data_array1);
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- block[0] = 1;
- block[1] = (hsize_t)dim1;
- stride[0] = 1;
- stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
-
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* Dataset5: point selection in File - Hyperslab selection in Memory*/
- /* create a file dataspace independently */
- point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space(dataset5);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- start[0] = 0;
- start[1] = 0;
- mem_dataspace = H5Dget_space(dataset5);
- VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset6: point selection in File - Point selection in Memory*/
- /* create a file dataspace independently */
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space(dataset6);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- start[0] = 0;
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, IN_ORDER);
- mem_dataspace = H5Dget_space(dataset6);
- VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset7: point selection in File - All selection in Memory*/
- /* create a file dataspace independently */
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space(dataset7);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- current_dims = num_points;
- mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
-
- ret = H5Sselect_all(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /*
- * All writes completed. Close datasets collectively
- */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
- ret = H5Dclose(dataset3);
- VRFY((ret >= 0), "H5Dclose3 succeeded");
- ret = H5Dclose(dataset4);
- VRFY((ret >= 0), "H5Dclose4 succeeded");
- ret = H5Dclose(dataset5);
- VRFY((ret >= 0), "H5Dclose5 succeeded");
- ret = H5Dclose(dataset6);
- VRFY((ret >= 0), "H5Dclose6 succeeded");
- ret = H5Dclose(dataset7);
- VRFY((ret >= 0), "H5Dclose7 succeeded");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (coords)
- free(coords);
- if (data_array1)
- free(data_array1);
-}
-
-/*
- * Example of using the parallel HDF5 library to read two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
- * each process controls a hyperslab within.]
- */
-
-void
-dataset_readAll(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
- const char *filename;
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- int i, j, k;
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Collective read test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* set up the coords array selection */
- num_points = (size_t)dim1;
- coords = (hsize_t *)malloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
- VRFY((fid >= 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* --------------------------
- * Open the datasets in it
- * ------------------------- */
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded");
-
- /* open another dataset collectively */
- dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT);
- VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded");
- dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT);
- VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded");
- dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT);
- VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded");
-
- /*
- * Set up dimensions of the slab this process accesses.
- */
-
- /* Dataset1: each process takes a block of columns. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* setup dimensions again to readAll with zero columns for process 0 */
- if (VERBOSE_MED)
- printf("readAll by some with zero col\n");
- slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- /* need to make mem_dataspace to match for process 0 */
- if (MAINPROCESS) {
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
- }
- MESG("readAll by some with zero col");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* release all temporary handles. */
- /* Could have used them for dataset2 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset2: each process takes a block of rows. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset2 succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* setup dimensions again to readAll with zero rows for process 0 */
- if (VERBOSE_MED)
- printf("readAll by some with zero row\n");
- slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- /* need to make mem_dataspace to match for process 0 */
- if (MAINPROCESS) {
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
- }
- MESG("readAll by some with zero row");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- if (data_array1)
- free(data_array1);
- if (data_origin1)
- free(data_origin1);
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- block[0] = 1;
- block[1] = (hsize_t)dim1;
- stride[0] = 1;
- stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
-
- dataset_fill(start, block, data_origin1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
- }
-
- /* Dataset5: point selection in memory - Hyperslab selection in file*/
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset5);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- start[0] = 0;
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space(dataset5);
- VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- if (data_array1)
- free(data_array1);
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* Dataset6: point selection in File - Point selection in Memory*/
- /* create a file dataspace independently */
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space(dataset6);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- start[0] = 0;
- start[1] = 0;
- point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space(dataset6);
- VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset6 succeeded");
-
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if (ret)
- nerrors++;
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- if (data_array1)
- free(data_array1);
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* Dataset7: point selection in memory - All selection in file*/
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset7);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all succeeded");
-
- num_points = (size_t)(dim0 * dim1);
- k = 0;
- for (i = 0; i < dim0; i++) {
- for (j = 0; j < dim1; j++) {
- coords[k++] = (hsize_t)i;
- coords[k++] = (hsize_t)j;
- }
- }
- mem_dataspace = H5Dget_space(dataset7);
- VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread dataset7 succeeded");
-
- start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
- start[1] = 0;
- ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
- data_origin1);
- if (ret)
- nerrors++;
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /*
- * All reads completed. Close datasets collectively
- */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
- ret = H5Dclose(dataset5);
- VRFY((ret >= 0), "H5Dclose5 succeeded");
- ret = H5Dclose(dataset6);
- VRFY((ret >= 0), "H5Dclose6 succeeded");
- ret = H5Dclose(dataset7);
- VRFY((ret >= 0), "H5Dclose7 succeeded");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (coords)
- free(coords);
- if (data_array1)
- free(data_array1);
- if (data_origin1)
- free(data_origin1);
-}
-
-/*
- * Part 2--Independent read/write for extendible datasets.
- */
-
-/*
- * Example of using the parallel HDF5 library to create two extendible
- * datasets in one HDF5 file with independent parallel MPIO access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset.
- */
-
-void
-extend_writeInd(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = (hsize_t)chunkdim0;
- chunk_dims[1] = (hsize_t)chunkdim1;
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
- {
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts = 4;
- ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
- }
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* --------------------------------------------------------------
- * Define the dimensions of the overall datasets and create them.
- * ------------------------------------------------------------- */
-
- /* set up dataset storage chunk sizes and creation property list */
- if (VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
- dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- /* setup dimensionality object */
- /* start out with no rows, extend it later. */
- dims[0] = dims[1] = 0;
- sid = H5Screate_simple(RANK, dims, max_dims);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create an extendible dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- /* create another extendible dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
- /* release resource */
- H5Sclose(sid);
- H5Pclose(dataset_pl);
-
- /* -------------------------
- * Test writing to dataset1
- * -------------------------*/
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* Extend its current dim sizes before writing */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
- VRFY((ret >= 0), "H5Dset_extent succeeded");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* release resource */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
-
- /* -------------------------
- * Test writing to dataset2
- * -------------------------*/
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* Try write to dataset2 beyond its current dim sizes. Should fail. */
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* write data independently. Should fail. */
- H5E_BEGIN_TRY
- {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- }
- H5E_END_TRY
- VRFY((ret < 0), "H5Dwrite failed as expected");
-
- H5Sclose(file_dataspace);
-
- /* Extend dataset2 and try again. Should succeed. */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
- VRFY((ret >= 0), "H5Dset_extent succeeded");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* release resource */
- ret = H5Sclose(file_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
-}
-
-/*
- * Example of using the parallel HDF5 library to create an extendable dataset
- * and perform I/O on it in a way that verifies that the chunk cache is
- * bypassed for parallel I/O.
- */
-
-void
-extend_writeInd2(void)
-{
- const char *filename;
- hid_t fid; /* HDF5 file ID */
- hid_t fapl; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size = 10; /* Original dataset dim size */
- hsize_t new_size = 20; /* Extended dataset dim size */
- hsize_t one = 1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
- int written[10], /* Data to write */
- retrieved[10]; /* Data read in */
- int mpi_size, mpi_rank; /* MPI settings */
- int i; /* Local index variable */
- herr_t ret; /* Generic return value */
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent write test #2 on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl >= 0), "create_faccess_plist succeeded");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* --------------------------------------------------------------
- * Define the dimensions of the overall datasets and create them.
- * ------------------------------------------------------------- */
-
- /* set up dataset storage chunk sizes and creation property list */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_chunk(dcpl, 1, &chunk_size);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- /* setup dimensionality object */
- fs = H5Screate_simple(1, &orig_size, &max_size);
- VRFY((fs >= 0), "H5Screate_simple succeeded");
-
- /* create an extendible dataset collectively */
- dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreat2e succeeded");
-
- /* release resource */
- ret = H5Pclose(dcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* -------------------------
- * Test writing to dataset
- * -------------------------*/
- /* create a memory dataspace independently */
- ms = H5Screate_simple(1, &orig_size, &max_size);
- VRFY((ms >= 0), "H5Screate_simple succeeded");
-
- /* put some trivial data in the data_array */
- for (i = 0; i < (int)orig_size; i++)
- written[i] = i;
- MESG("data array initialized");
- if (VERBOSE_MED) {
- MESG("writing at offset zero: ");
- for (i = 0; i < (int)orig_size; i++)
- printf("%s%d", i ? ", " : "", written[i]);
- printf("\n");
- }
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* -------------------------
- * Read initial data from dataset.
- * -------------------------*/
- ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
- VRFY((ret >= 0), "H5Dread succeeded");
- for (i = 0; i < (int)orig_size; i++)
- if (written[i] != retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
- written[i], i, retrieved[i]);
- nerrors++;
- }
- if (VERBOSE_MED) {
- MESG("read at offset zero: ");
- for (i = 0; i < (int)orig_size; i++)
- printf("%s%d", i ? ", " : "", retrieved[i]);
- printf("\n");
- }
-
- /* -------------------------
- * Extend the dataset & retrieve new dataspace
- * -------------------------*/
- ret = H5Dset_extent(dataset, &new_size);
- VRFY((ret >= 0), "H5Dset_extent succeeded");
- ret = H5Sclose(fs);
- VRFY((ret >= 0), "H5Sclose succeeded");
- fs = H5Dget_space(dataset);
- VRFY((fs >= 0), "H5Dget_space succeeded");
-
- /* -------------------------
- * Write to the second half of the dataset
- * -------------------------*/
- for (i = 0; i < (int)orig_size; i++)
- written[i] = (int)orig_size + i;
- MESG("data array re-initialized");
- if (VERBOSE_MED) {
- MESG("writing at offset 10: ");
- for (i = 0; i < (int)orig_size; i++)
- printf("%s%d", i ? ", " : "", written[i]);
- printf("\n");
- }
- ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
- VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* -------------------------
- * Read the new data
- * -------------------------*/
- ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
- VRFY((ret >= 0), "H5Dread succeeded");
- for (i = 0; i < (int)orig_size; i++)
- if (written[i] != retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
- written[i], i, retrieved[i]);
- nerrors++;
- }
- if (VERBOSE_MED) {
- MESG("read at offset 10: ");
- for (i = 0; i < (int)orig_size; i++)
- printf("%s%d", i ? ", " : "", retrieved[i]);
- printf("\n");
- }
-
- /* Close dataset collectively */
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
-
- /* Close the file collectively */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-}
-
-/* Example of using the parallel HDF5 library to read an extendible dataset */
-void
-extend_readInd(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
- const char *filename;
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array2 != NULL), "data_array2 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
- VRFY((fid >= 0), "");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "");
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "");
-
- /* Try extend dataset1 which is open RDONLY. Should fail. */
-
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
- VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
- dims[0]++;
- H5E_BEGIN_TRY
- {
- ret = H5Dset_extent(dataset1, dims);
- }
- H5E_END_TRY
- VRFY((ret < 0), "H5Dset_extent failed as expected");
-
- H5Sclose(file_dataspace);
-
- /* Read dataset1 using BYROW pattern */
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dread succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- VRFY((ret == 0), "dataset1 read verified correct");
- if (ret)
- nerrors++;
-
- H5Sclose(mem_dataspace);
- H5Sclose(file_dataspace);
-
- /* Read dataset2 using BYCOL pattern */
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dread succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- VRFY((ret == 0), "dataset2 read verified correct");
- if (ret)
- nerrors++;
-
- H5Sclose(mem_dataspace);
- H5Sclose(file_dataspace);
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
- if (data_array2)
- free(data_array2);
- if (data_origin1)
- free(data_origin1);
-}
-
-/*
- * Part 3--Collective read/write for extendible datasets.
- */
-
-/*
- * Example of using the parallel HDF5 library to create two extendible
- * datasets in one HDF5 file with collective parallel MPIO access support.
- * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
- * Each process controls only a slab of size dim0 x dim1 within each
- * dataset.
- */
-
-void
-extend_writeAll(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = (hsize_t)chunkdim0;
- chunk_dims[1] = (hsize_t)chunkdim1;
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
- {
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts = 4;
- ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
- }
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* --------------------------------------------------------------
- * Define the dimensions of the overall datasets and create them.
- * ------------------------------------------------------------- */
-
- /* set up dataset storage chunk sizes and creation property list */
- if (VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
- dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- /* setup dimensionality object */
- /* start out with no rows, extend it later. */
- dims[0] = dims[1] = 0;
- sid = H5Screate_simple(RANK, dims, max_dims);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create an extendible dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- /* create another extendible dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
- /* release resource */
- H5Sclose(sid);
- H5Pclose(dataset_pl);
-
- /* -------------------------
- * Test writing to dataset1
- * -------------------------*/
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* Extend its current dim sizes before writing */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
- VRFY((ret >= 0), "H5Dset_extent succeeded");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* release resource */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* -------------------------
- * Test writing to dataset2
- * -------------------------*/
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* put some trivial data in the data_array */
- dataset_fill(start, block, data_array1);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Try write to dataset2 beyond its current dim sizes. Should fail. */
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* write data independently. Should fail. */
- H5E_BEGIN_TRY
- {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- }
- H5E_END_TRY
- VRFY((ret < 0), "H5Dwrite failed as expected");
-
- H5Sclose(file_dataspace);
-
- /* Extend dataset2 and try again. Should succeed. */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
- VRFY((ret >= 0), "H5Dset_extent succeeded");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* release resource */
- ret = H5Sclose(file_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
-}
-
-/* Example of using the parallel HDF5 library to read an extendible dataset */
-void
-extend_readAll(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* allocate memory for data buffer */
- data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_array2 != NULL), "data_array2 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
- VRFY((fid >= 0), "");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "");
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "");
-
- /* Try extend dataset1 which is open RDONLY. Should fail. */
-
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
- VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
- dims[0]++;
- H5E_BEGIN_TRY
- {
- ret = H5Dset_extent(dataset1, dims);
- }
- H5E_END_TRY
- VRFY((ret < 0), "H5Dset_extent failed as expected");
-
- H5Sclose(file_dataspace);
-
- /* Read dataset1 using BYROW pattern */
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- VRFY((ret == 0), "dataset1 read verified correct");
- if (ret)
- nerrors++;
-
- H5Sclose(mem_dataspace);
- H5Sclose(file_dataspace);
- H5Pclose(xfer_plist);
-
- /* Read dataset2 using BYCOL pattern */
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* fill dataset with test data */
- dataset_fill(start, block, data_origin1);
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- VRFY((ret >= 0), "H5Dread succeeded");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- VRFY((ret == 0), "dataset2 read verified correct");
- if (ret)
- nerrors++;
-
- H5Sclose(mem_dataspace);
- H5Sclose(file_dataspace);
- H5Pclose(xfer_plist);
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_array1)
- free(data_array1);
- if (data_array2)
- free(data_array2);
- if (data_origin1)
- free(data_origin1);
-}
-
-#ifdef H5_HAVE_FILTER_DEFLATE
-/*
- * Example of using the parallel HDF5 library to read a compressed
- * dataset in an HDF5 file with collective parallel access support.
- */
-void
-compress_readAll(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
- int rank = 1; /* Dataspace rank */
- hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
- unsigned u; /* Local index variable */
- unsigned chunk_opts; /* Chunk options */
- unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- DATATYPE *data_read = NULL; /* data buffer */
- DATATYPE *data_orig = NULL; /* expected data buffer */
- const char *filename;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Collective chunked dataset read test on file %s\n", filename);
-
- /* Retrieve MPI parameters */
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* Allocate data buffer */
- data_orig = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE));
- VRFY((data_orig != NULL), "data_origin1 malloc succeeded");
- data_read = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE));
- VRFY((data_read != NULL), "data_array1 malloc succeeded");
-
- /* Initialize data buffers */
- for (u = 0; u < dim; u++)
- data_orig[u] = (DATATYPE)u;
-
- /* Run test both with and without filters disabled on partial chunks */
- for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
- disable_partial_chunk_filters++) {
- /* Process zero creates the file with a compressed, chunked dataset */
- if (mpi_rank == 0) {
- hsize_t chunk_dim; /* Chunk dimensions */
-
- /* Create the file */
- fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((fid > 0), "H5Fcreate succeeded");
-
- /* Create property list for chunking and compression */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_layout(dcpl, H5D_CHUNKED);
- VRFY((ret >= 0), "H5Pset_layout succeeded");
-
- /* Use eight chunks */
- chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- /* Set chunk options appropriately */
- if (disable_partial_chunk_filters) {
- ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
- VRFY((ret >= 0), "H5Pget_chunk_opts succeeded");
-
- chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
-
- ret = H5Pset_chunk_opts(dcpl, chunk_opts);
- VRFY((ret >= 0), "H5Pset_chunk_opts succeeded");
- } /* end if */
-
- ret = H5Pset_deflate(dcpl, 9);
- VRFY((ret >= 0), "H5Pset_deflate succeeded");
-
- /* Create dataspace */
- dataspace = H5Screate_simple(rank, &dim, NULL);
- VRFY((dataspace > 0), "H5Screate_simple succeeded");
-
- /* Create dataset */
- dataset =
- H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dcreate2 succeeded");
-
- /* Write compressed data */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* Close objects */
- ret = H5Pclose(dcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Sclose(dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- /* Wait for file to be created */
- MPI_Barrier(comm);
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
- VRFY((fid > 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* Open dataset with compressed chunks */
- dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dopen2 succeeded");
-
- /* Try reading & writing data */
- if (dataset > 0) {
- /* Create dataset transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Try reading the data */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Dread succeeded");
-
- /* Verify data read */
- for (u = 0; u < dim; u++)
- if (data_orig[u] != data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
- (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
- nerrors++;
- }
-
-#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-#endif
-
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- } /* end if */
-
- /* Close file */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- } /* end for */
-
- /* release data buffers */
- if (data_read)
- free(data_read);
- if (data_orig)
- free(data_orig);
-}
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-/*
- * Part 4--Non-selection for chunked dataset
- */
-
-/*
- * Example of using the parallel HDF5 library to create chunked
- * dataset in one HDF5 file with collective and independent parallel
- * MPIO access support. The Datasets are of sizes dim0 x dim1.
- * Each process controls only a slab of size dim0 x dim1 within the
- * dataset with the exception that one processor selects no element.
- */
-
-void
-none_selection_chunk(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t mstart[RANK]; /* for data buffer in memory */
-
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- filename = PARATESTFILE /* GetTestParameters() */;
- if (VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = (hsize_t)chunkdim0;
- chunk_dims[1] = (hsize_t)chunkdim1;
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* --------------------------------------------------------------
- * Define the dimensions of the overall datasets and create them.
- * ------------------------------------------------------------- */
-
- /* set up dataset storage chunk sizes and creation property list */
- if (VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
- dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- /* setup dimensionality object */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create an extendible dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- /* create another extendible dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
- /* release resource */
- H5Sclose(sid);
- H5Pclose(dataset_pl);
-
- /* -------------------------
- * Test collective writing to dataset1
- * -------------------------*/
- /* set up dimensions of the slab this process accesses */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- /* allocate memory for data buffer. Only allocate enough buffer for
- * each processor's data. */
- if (mpi_rank) {
- data_origin = (DATATYPE *)malloc(block[0] * block[1] * sizeof(DATATYPE));
- VRFY((data_origin != NULL), "data_origin malloc succeeded");
-
- data_array = (DATATYPE *)malloc(block[0] * block[1] * sizeof(DATATYPE));
- VRFY((data_array != NULL), "data_array malloc succeeded");
-
- /* put some trivial data in the data_array */
- mstart[0] = mstart[1] = 0;
- dataset_fill(mstart, block, data_origin);
- MESG("data_array initialized");
- if (VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
- }
- }
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* Process 0 has no selection */
- if (!mpi_rank) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none succeeded");
- }
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* Process 0 has no selection */
- if (!mpi_rank) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none succeeded");
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
- VRFY((ret >= 0), "");
-
- /* verify the read data with original expected data */
- if (mpi_rank) {
- ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if (ret)
- nerrors++;
- }
-
- /* -------------------------
- * Test independent writing to dataset2
- * -------------------------*/
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* write data collectively */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
- VRFY((ret >= 0), "");
-
- /* verify the read data with original expected data */
- if (mpi_rank) {
- ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if (ret)
- nerrors++;
- }
-
- /* release resource */
- ret = H5Sclose(file_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose2 succeeded");
-
- /* close the file collectively */
- H5Fclose(fid);
-
- /* release data buffers */
- if (data_origin)
- free(data_origin);
- if (data_array)
- free(data_array);
-}
-
-/* Function: test_actual_io_mode
- *
- * Purpose: tests one specific case of collective I/O and checks that the
- * actual_chunk_opt_mode property and the actual_io_mode
- * properties in the DXPL have the correct values.
- *
- * Input: selection_mode: changes the way processes select data from the space, as well
- * as some dxpl flags to get collective I/O to break in different ways.
- *
- * The relevant I/O function and expected response for each mode:
- * TEST_ACTUAL_IO_MULTI_CHUNK_IND:
- * H5D_mpi_chunk_collective_io, each process reports independent I/O
- *
- * TEST_ACTUAL_IO_MULTI_CHUNK_COL:
- * H5D_mpi_chunk_collective_io, each process reports collective I/O
- *
- * TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
- * H5D_mpi_chunk_collective_io, each process reports mixed I/O
- *
- * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
- * H5D_mpi_chunk_collective_io, processes disagree. The root reports
- * collective, the rest report independent I/O
- *
- * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
- * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
- * Set directly go to multi-chunk-io without num threshold calc.
- * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
- * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
- * Set directly go to multi-chunk-io without num threshold calc.
- *
- * TEST_ACTUAL_IO_LINK_CHUNK:
- * H5D_link_chunk_collective_io, processes report linked chunk I/O
- *
- * TEST_ACTUAL_IO_CONTIGUOUS:
- * H5D__contig_collective_write or H5D__contig_collective_read
- * each process reports contiguous collective I/O
- *
- * TEST_ACTUAL_IO_NO_COLLECTIVE:
- * Simple independent I/O. This tests that the defaults are properly set.
- *
- * TEST_ACTUAL_IO_RESET:
- * Performs collective and then independent I/O with hthe same dxpl to
- * make sure the property is correctly reset to the default on each use.
- * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
- * (The most complex case that works on all builds) and then performs
- * an independent read and write with the same dxpls.
- *
- * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
- * is not needed as they are covered by DIRECT_CHUNK_MIX and
- * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
- * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold.
- */
-static void
-test_actual_io_mode(int selection_mode)
-{
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
- const char *filename;
- const char *test_name;
- bool direct_multi_chunk_io;
- bool multi_chunk_io;
- bool is_chunked;
- bool is_collective;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int *buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
- hsize_t dims[RANK];
- hsize_t chunk_dims[RANK];
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
- char message[256];
- herr_t ret;
-
- /* Set up some flags to make some future if statements slightly more readable */
- direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
-
- /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
- * tests independent I/O
- */
- multi_chunk_io =
- (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
-
- is_chunked =
- (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
-
- is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
-
- /* Set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- assert(mpi_size >= 1);
-
- mpi_comm = MPI_COMM_WORLD;
- mpi_info = MPI_INFO_NULL;
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- assert(filename != NULL);
-
- /* Setup the file access template */
- fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((fapl >= 0), "create_faccess_plist() succeeded");
-
- /* Create the file */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Create the basic Space */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- /* If we are not testing contiguous datasets */
- if (is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0), "chunk creation property list succeeded");
- }
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
- /* Create the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Choose a selection method based on the type of I/O we want to occur,
- * and also set up some selection-dependeent test info. */
- switch (selection_mode) {
-
- /* Independent I/O with optimization */
- case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
- case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
- /* Since the dataset is chunked by row and each process selects a row,
- * each process writes to a different chunk. This forces all I/O to be
- * independent.
- */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Multi Chunk - Independent";
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
- break;
-
- /* Collective I/O with optimization */
- case TEST_ACTUAL_IO_MULTI_CHUNK_COL:
- case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
- /* The dataset is chunked by rows, so each process takes a column which
- * spans all chunks. Since the processes write non-overlapping regular
- * selections to each chunk, the operation is purely collective.
- */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- test_name = "Multi Chunk - Collective";
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if (mpi_size > 1)
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
- else
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
- break;
-
- /* Mixed I/O with optimization */
- case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
- /* A chunk will be assigned collective I/O only if it is selected by each
- * process. To get mixed I/O, have the root select all chunks and each
- * subsequent process select the first and nth chunk. The first chunk,
- * accessed by all, will be assigned collective I/O while each other chunk
- * will be accessed only by the root and the nth process and will be
- * assigned independent I/O. Each process will access one chunk collectively
- * and at least one chunk independently, reporting mixed I/O.
- */
-
- if (mpi_rank == 0) {
- /* Select the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- }
- else {
- /* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
- stride[0] = (hsize_t)mpi_rank * block[0];
- stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank * block[1];
- }
-
- test_name = "Multi Chunk - Mixed";
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
- break;
-
- /* RESET tests that the properties are properly reset to defaults each time I/O is
- * performed. To achieve this, we have RESET perform collective I/O (which would change
- * the values from the defaults) followed by independent I/O (which should report the
- * default values). RESET doesn't need to have a unique selection, so we reuse
- * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
- * on all builds. The independent section of RESET can be found at the end of this function.
- */
- case TEST_ACTUAL_IO_RESET:
-
- /* Mixed I/O with optimization and internal disagreement */
- case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
- /* A chunk will be assigned collective I/O only if it is selected by each
- * process. To get mixed I/O with disagreement, assign process n to the
- * first chunk and the nth chunk. The first chunk, selected by all, is
- * assgigned collective I/O, while each other process gets independent I/O.
- * Since the root process with only access the first chunk, it will report
- * collective I/O. The subsequent processes will access the first chunk
- * collectively, and their other chunk independently, reporting mixed I/O.
- */
-
- if (mpi_rank == 0) {
- /* Select the first chunk in the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / (hsize_t)mpi_size;
- }
- else {
- /* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
- stride[0] = (hsize_t)mpi_rank * block[0];
- stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank * block[1];
- }
-
- /* If the testname was not already set by the RESET case */
- if (selection_mode == TEST_ACTUAL_IO_RESET)
- test_name = "RESET";
- else
- test_name = "Multi Chunk - Mixed (Disagreement)";
-
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if (mpi_size > 1) {
- if (mpi_rank == 0)
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
- else
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
- }
- else
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
-
- break;
-
- /* Linked Chunk I/O */
- case TEST_ACTUAL_IO_LINK_CHUNK:
- /* Nothing special; link chunk I/O is forced in the dxpl settings. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Link Chunk";
- actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
- break;
-
- /* Contiguous Dataset */
- case TEST_ACTUAL_IO_CONTIGUOUS:
- /* A non overlapping, regular selection in a contiguous dataset leads to
- * collective I/O */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Contiguous";
- actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
- break;
-
- case TEST_ACTUAL_IO_NO_COLLECTIVE:
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Independent";
- actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
- break;
-
- default:
- test_name = "Undefined Selection Mode";
- actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
- break;
- }
-
- ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* Create a memory dataspace mirroring the dataset and select the same hyperslab
- * as in the file space.
- */
- mem_space = H5Screate_simple(RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
-
- ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* Get the number of elements in the selection */
- length = dim0 * dim1;
-
- /* Allocate and initialize the buffer */
- buffer = (int *)malloc(sizeof(int) * (size_t)length);
- VRFY((buffer != NULL), "malloc of buffer succeeded");
- for (i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl_write = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- /* Set collective I/O properties in the dxpl. */
- if (is_collective) {
- /* Request collective I/O */
- ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Set the threshold number of processes per chunk to twice mpi_size.
- * This will prevent the threshold from ever being met, thus forcing
- * multi chunk io instead of link chunk io.
- * This is via default.
- */
- if (multi_chunk_io) {
- /* force multi-chunk-io by threshold */
- ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
-
- /* set this to manipulate testing scenario about allocating processes
- * to chunks */
- ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
- }
-
- /* Set directly go to multi-chunk-io without threshold calc. */
- if (direct_multi_chunk_io) {
- /* set for multi chunk io by property*/
- ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- }
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl_read = H5Pcopy(dxpl_write);
- VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Retrieve Actual io values */
- ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retrieving actual io mode succeeded");
-
- ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Retrieve Actual io values */
- ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retrieving actual io mode succeeded");
-
- ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
-
- /* Check write vs read */
- VRFY((actual_io_mode_read == actual_io_mode_write),
- "reading and writing are the same for actual_io_mode");
- VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
- "reading and writing are the same for actual_chunk_opt_mode");
-
- /* Test values */
- if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
- actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
- snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
- test_name);
- VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
- VRFY((actual_io_mode_write == actual_io_mode_expected), message);
- }
- else {
- fprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
- actual_io_mode_write);
- }
-
- /* To test that the property is successfully reset to the default, we perform some
- * independent I/O after the collective I/O
- */
- if (selection_mode == TEST_ACTUAL_IO_RESET) {
- if (mpi_rank == 0) {
- /* Switch to independent io */
- ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Check Properties */
- ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retrieving actual io mode succeeded");
- ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
-
- VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset write (independent)");
- VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset write (independent)");
-
- /* Read */
- ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Check Properties */
- ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retrieving actual io mode succeeded");
- ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
-
- VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset read (independent)");
- VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset read (independent)");
- }
- }
-
- /* Release some resources */
- ret = H5Sclose(sid);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Pclose(dcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Pclose(dxpl_write);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Pclose(dxpl_read);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(mem_space);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Sclose(file_space);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- free(buffer);
- return;
-}
-
-/* Function: actual_io_mode_tests
- *
- * Purpose: Tests all possible cases of the actual_io_mode property.
- *
- */
-void
-actual_io_mode_tests(void)
-{
- int mpi_size = -1;
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Only run these tests if selection I/O is not being used - selection I/O
- * bypasses this IO mode decision - it's effectively always multi chunk
- * currently */
- if (/* !H5_use_selection_io_g */ true) {
- test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
-
- /*
- * Test multi-chunk-io via proc_num threshold
- */
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
-
- /* The Multi Chunk Mixed test requires at least three processes. */
- if (mpi_size > 2)
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
- else
- fprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
-
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
-
- /*
- * Test multi-chunk-io via setting direct property
- */
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
-
- test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
- test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
-
- test_actual_io_mode(TEST_ACTUAL_IO_RESET);
- }
-
- return;
-}
-
-/*
- * Function: test_no_collective_cause_mode
- *
- * Purpose:
- * tests cases for broken collective I/O and checks that the
- * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
- *
- * Input:
- * selection_mode: various mode to cause broken collective I/O
- * Note: Originally, each TEST case is supposed to be used alone.
- * After some discussion, this is updated to take multiple TEST cases
- * with '|'. However there is no error check for any of combined
- * test cases, so a tester is responsible to understand and feed
- * proper combination of TESTs if needed.
- *
- *
- * TEST_COLLECTIVE:
- * Test for regular collective I/O without cause of breaking.
- * Just to test normal behavior.
- *
- * TEST_SET_INDEPENDENT:
- * Test for Independent I/O as the cause of breaking collective I/O.
- *
- * TEST_DATATYPE_CONVERSION:
- * Test for Data Type Conversion as the cause of breaking collective I/O.
- *
- * TEST_DATA_TRANSFORMS:
- * Test for Data Transform feature as the cause of breaking collective I/O.
- *
- * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
- * Test for NULL dataspace as the cause of breaking collective I/O.
- *
- * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
- * Test for Compact layout as the cause of breaking collective I/O.
- *
- * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
- * Test for Externl-File storage as the cause of breaking collective I/O.
- *
- */
-#ifdef LATER
-#define DSET_NOCOLCAUSE "nocolcause"
-#endif
-#define FILE_EXTERNAL "nocolcause_extern.data"
-static void
-test_no_collective_cause_mode(int selection_mode)
-{
- uint32_t no_collective_cause_local_write = 0;
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_write = 0;
- uint32_t no_collective_cause_global_read = 0;
- uint32_t no_collective_cause_global_expected = 0;
-
- const char *filename;
- const char *test_name;
- bool is_chunked = 1;
- bool is_independent = 0;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int *buffer;
- int i;
- MPI_Comm mpi_comm;
- MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
- hsize_t dims[RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hsize_t chunk_dims[RANK];
- herr_t ret;
- /* set to global value as default */
- int l_facc_type = facc_type;
- char message[256];
-
- /* Set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- assert(mpi_size >= 1);
-
- mpi_comm = MPI_COMM_WORLD;
- mpi_info = MPI_INFO_NULL;
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
- ret = H5Pset_layout(dcpl, H5D_COMPACT);
- VRFY((ret >= 0), "set COMPACT layout succeeded");
- is_chunked = 0;
- }
-
- if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
- VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
- is_chunked = 0;
- }
-
- if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
- sid = H5Screate(H5S_NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
- is_chunked = 0;
- }
- else {
- /* Create the basic Space */
- /* if this is a compact dataset, create a small dataspace that does not exceed 64K */
- if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
- dims[0] = ROW_FACTOR * 6;
- dims[1] = COL_FACTOR * 6;
- }
- else {
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- }
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
- }
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- assert(filename != NULL);
-
- /* Setup the file access template */
- fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type);
- VRFY((fapl >= 0), "create_faccess_plist() succeeded");
-
- /* Create the file */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
-
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* If we are not testing contiguous datasets */
- if (is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0), "chunk creation property list succeeded");
- }
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
- /*
- * Set expected causes and some tweaks based on the type of test
- */
- if (selection_mode & TEST_DATATYPE_CONVERSION) {
- test_name = "Broken Collective I/O - Datatype Conversion";
- no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
- no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
- /* set different sign to trigger type conversion */
- data_type = H5T_NATIVE_UINT;
- }
-
- if (selection_mode & TEST_DATA_TRANSFORMS) {
- test_name = "Broken Collective I/O - DATA Transforms";
- no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
- no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
- }
-
- if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
- test_name = "Broken Collective I/O - No Simple or Scalar DataSpace";
- no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
- no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
- }
-
- if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT ||
- selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset";
- no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
- no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
- }
-
- if (selection_mode & TEST_COLLECTIVE) {
- test_name = "Broken Collective I/O - Not Broken";
- no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
- no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
- }
-
- if (selection_mode & TEST_SET_INDEPENDENT) {
- test_name = "Broken Collective I/O - Independent";
- no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
- no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
- /* switch to independent io */
- is_independent = 1;
- }
-
- /* use all spaces for certain tests */
- if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
- selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- file_space = H5S_ALL;
- mem_space = H5S_ALL;
- }
- else {
- /* Get the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Create the memory dataspace */
- mem_space = H5Screate_simple(RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
- }
-
- /* Get the number of elements in the selection */
- length = (int)(dims[0] * dims[1]);
-
- /* Allocate and initialize the buffer */
- buffer = (int *)malloc(sizeof(int) * (size_t)length);
- VRFY((buffer != NULL), "malloc of buffer succeeded");
- for (i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl_write = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (is_independent) {
- /* Set Independent I/O */
- ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- else {
- /* Set Collective I/O */
- ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
- if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform(dxpl_write, "x+1");
- VRFY((ret >= 0), "H5Pset_data_transform succeeded");
- }
-
- /*---------------------
- * Test Write access
- *---------------------*/
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
- &no_collective_cause_global_write);
- VRFY((ret >= 0), "retrieving no collective cause succeeded");
-
- /*---------------------
- * Test Read access
- *---------------------*/
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl_read = H5Pcopy(dxpl_write);
- VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
- &no_collective_cause_global_read);
- VRFY((ret >= 0), "retrieving no collective cause succeeded");
-
- /* Check write vs read */
- VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
- "reading and writing are the same for local cause of Broken Collective I/O");
- VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
- "reading and writing are the same for global cause of Broken Collective I/O");
-
- /* Test values */
- memset(message, 0, sizeof(message));
- snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n",
- test_name);
- VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- memset(message, 0, sizeof(message));
- snprintf(message, sizeof(message),
- "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
- VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
-
- /* Release some resources */
- if (sid)
- H5Sclose(sid);
- if (dcpl)
- H5Pclose(dcpl);
- if (dxpl_write)
- H5Pclose(dxpl_write);
- if (dxpl_read)
- H5Pclose(dxpl_read);
- if (dataset)
- H5Dclose(dataset);
- if (mem_space)
- H5Sclose(mem_space);
- if (file_space)
- H5Sclose(file_space);
- if (fid)
- H5Fclose(fid);
- free(buffer);
-
- /* clean up external file */
- if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL)
- H5Fdelete(FILE_EXTERNAL, fapl);
-
- if (fapl)
- H5Pclose(fapl);
-
- return;
-}
-
-/* Function: no_collective_cause_tests
- *
- * Purpose: Tests cases for broken collective IO.
- *
- */
-void
-no_collective_cause_tests(void)
-{
- /*
- * Test individual cause
- */
- test_no_collective_cause_mode(TEST_COLLECTIVE);
- test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
- test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
- test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
- test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
-
- /*
- * Test combined causes
- */
- test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
- TEST_DATA_TRANSFORMS);
-
- return;
-}
-
-/*
- * Test consistency semantics of atomic mode
- */
-
-/*
- * Example of using the parallel HDF5 library to create a dataset,
- * where process 0 writes and the other processes read at the same
- * time. If atomic mode is set correctly, the other processes should
- * read the old values in the dataset or the new ones.
- */
-
-void
-dataset_atomicity(void)
-{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
- int buf_size;
- hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
- const char *filename;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
- int i, j, k;
- bool atomicity = false;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- dim0 = 64;
- dim1 = 32;
- filename = PARATESTFILE /* GetTestParameters() */;
- if (facc_type != FACC_MPIO) {
- printf("Atomicity tests will not work without the MPIO VFD\n");
- return;
- }
- if (VERBOSE_MED)
- printf("atomic writes to file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, basic dataset, or more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- buf_size = dim0 * dim1;
- /* allocate memory for data buffer */
- write_buf = (int *)calloc((size_t)buf_size, sizeof(int));
- VRFY((write_buf != NULL), "write_buf calloc succeeded");
- /* allocate memory for data buffer */
- read_buf = (int *)calloc((size_t)buf_size, sizeof(int));
- VRFY((read_buf != NULL), "read_buf calloc succeeded");
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* setup dimensionality object */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* create datasets */
- dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
-
- dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
- /* initialize datasets to 0s */
- if (0 == mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
- VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
- }
-
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(sid);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- MPI_Barrier(comm);
-
- /* make sure setting atomicity fails on a serial file ID */
- /* file locking allows only one file open (serial) for writing */
- if (MAINPROCESS) {
- fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeded");
-
- /* should fail */
- H5E_BEGIN_TRY
- {
- ret = H5Fset_mpi_atomicity(fid, true);
- }
- H5E_END_TRY
- VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- MPI_Barrier(comm);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
- VRFY((fid >= 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- ret = H5Fset_mpi_atomicity(fid, true);
- VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
-
- /* open dataset1 (contiguous case) */
- dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT);
- VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
-
- if (0 == mpi_rank) {
- for (i = 0; i < buf_size; i++) {
- write_buf[i] = 5;
- }
- }
- else {
- for (i = 0; i < buf_size; i++) {
- read_buf[i] = 8;
- }
- }
-
- /* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid, &atomicity);
- VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == true), "atomcity set failed");
-
- MPI_Barrier(comm);
-
- /* Process 0 writes contiguously to the entire dataset */
- if (0 == mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
- }
- /* The other processes read the entire dataset */
- else {
- ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
- }
-
- if (VERBOSE_MED) {
- i = 0;
- j = 0;
- k = 0;
- for (i = 0; i < dim0; i++) {
- printf("\n");
- for (j = 0; j < dim1; j++)
- printf("%d ", read_buf[k++]);
- }
- }
-
- /* The processes that read the dataset must either read all values
- as 0 (read happened before process 0 wrote to dataset 1), or 5
- (read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
- int compare = read_buf[0];
-
- VRFY((compare == 0 || compare == 5),
- "Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
- for (i = 1; i < buf_size; i++) {
- if (read_buf[i] != compare) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
- read_buf[i], compare);
- nerrors++;
- }
- }
- }
-
- ret = H5Dclose(dataset1);
- VRFY((ret >= 0), "H5D close succeeded");
-
- /* release data buffers */
- if (write_buf)
- free(write_buf);
- if (read_buf)
- free(read_buf);
-
- /* open dataset2 (non-contiguous case) */
- dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
- VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
-
- /* allocate memory for data buffer */
- write_buf = (int *)calloc((size_t)buf_size, sizeof(int));
- VRFY((write_buf != NULL), "write_buf calloc succeeded");
- /* allocate memory for data buffer */
- read_buf = (int *)calloc((size_t)buf_size, sizeof(int));
- VRFY((read_buf != NULL), "read_buf calloc succeeded");
-
- for (i = 0; i < buf_size; i++) {
- write_buf[i] = 5;
- }
- for (i = 0; i < buf_size; i++) {
- read_buf[i] = 8;
- }
-
- atomicity = false;
- /* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid, &atomicity);
- VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == true), "atomcity set failed");
-
- block[0] = (hsize_t)(dim0 / mpi_size - 1);
- block[1] = (hsize_t)(dim1 / mpi_size - 1);
- stride[0] = block[0] + 1;
- stride[1] = block[1] + 1;
- count[0] = (hsize_t)mpi_size;
- count[1] = (hsize_t)mpi_size;
- start[0] = 0;
- start[1] = 0;
-
- /* create a file dataspace */
- file_dataspace = H5Dget_space(dataset2);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace */
- mem_dataspace = H5Screate_simple(RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- MPI_Barrier(comm);
-
- /* Process 0 writes to the dataset */
- if (0 == mpi_rank) {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
- VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
- }
- /* All processes wait for the write to finish. This works because
- atomicity is set to true */
- MPI_Barrier(comm);
- /* The other processes read the entire dataset */
- if (0 != mpi_rank) {
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
- VRFY((ret >= 0), "H5Dread dataset2 succeeded");
- }
-
- if (VERBOSE_MED) {
- if (mpi_rank == 1) {
- i = 0;
- j = 0;
- k = 0;
- for (i = 0; i < dim0; i++) {
- printf("\n");
- for (j = 0; j < dim1; j++)
- printf("%d ", read_buf[k++]);
- }
- printf("\n");
- }
- }
-
- /* The processes that read the dataset must either read all values
- as 5 (read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
- int compare;
- i = 0;
- j = 0;
- k = 0;
-
- compare = 5;
-
- for (i = 0; i < dim0; i++) {
- if (i >= mpi_rank * ((int)block[0] + 1)) {
- break;
- }
- if ((i + 1) % ((int)block[0] + 1) == 0) {
- k += dim1;
- continue;
- }
- for (j = 0; j < dim1; j++) {
- if (j >= mpi_rank * ((int)block[1] + 1)) {
- k += dim1 - mpi_rank * ((int)block[1] + 1);
- break;
- }
- if ((j + 1) % ((int)block[1] + 1) == 0) {
- k++;
- continue;
- }
- else if (compare != read_buf[k]) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
- k, read_buf[k], compare);
- nerrors++;
- }
- k++;
- }
- }
- }
-
- ret = H5Dclose(dataset2);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(file_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
-
- /* release data buffers */
- if (write_buf)
- free(write_buf);
- if (read_buf)
- free(read_buf);
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-}
-
-/* Function: dense_attr_test
- *
- * Purpose: Test cases for writing dense attributes in parallel
- *
- */
-void
-test_dense_attr(void)
-{
- int mpi_size, mpi_rank;
- hid_t fpid, fid;
- hid_t gid, gpid;
- hid_t atFileSpace, atid;
- hsize_t atDims[1] = {10000};
- herr_t status;
- const char *filename;
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
- "this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* get filename */
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- assert(filename != NULL);
-
- fpid = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fpid > 0), "H5Pcreate succeeded");
- status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- VRFY((status >= 0), "H5Pset_libver_bounds succeeded");
- status = H5Pset_fapl_mpio(fpid, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((status >= 0), "H5Pset_fapl_mpio succeeded");
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid);
- VRFY((fid > 0), "H5Fcreate succeeded");
- status = H5Pclose(fpid);
- VRFY((status >= 0), "H5Pclose succeeded");
-
- gpid = H5Pcreate(H5P_GROUP_CREATE);
- VRFY((gpid > 0), "H5Pcreate succeeded");
- status = H5Pset_attr_phase_change(gpid, 0, 0);
- VRFY((status >= 0), "H5Pset_attr_phase_change succeeded");
- gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT);
- VRFY((gid > 0), "H5Gcreate2 succeeded");
- status = H5Pclose(gpid);
- VRFY((status >= 0), "H5Pclose succeeded");
-
- atFileSpace = H5Screate_simple(1, atDims, NULL);
- VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
- atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((atid > 0), "H5Acreate succeeded");
- status = H5Sclose(atFileSpace);
- VRFY((status >= 0), "H5Sclose succeeded");
-
- status = H5Aclose(atid);
- VRFY((status >= 0), "H5Aclose succeeded");
-
- status = H5Gclose(gid);
- VRFY((status >= 0), "H5Gclose succeeded");
- status = H5Fclose(fid);
- VRFY((status >= 0), "H5Fclose succeeded");
-
- return;
-}
diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c
deleted file mode 100644
index 61d009c..0000000
--- a/testpar/API/t_file.c
+++ /dev/null
@@ -1,1044 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Parallel tests for file operations
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#if 0
-#include "H5CXprivate.h" /* API Contexts */
-#include "H5Iprivate.h"
-#include "H5PBprivate.h"
-
-/*
- * This file needs to access private information from the H5F package.
- */
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
-#include "H5ACpkg.h"
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
-#include "H5Cpkg.h"
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
-#define H5F_TESTING
-#include "H5Fpkg.h"
-#define H5MF_FRIEND /*suppress error about including H5MFpkg */
-#include "H5MFpkg.h"
-#endif
-
-#define NUM_DSETS 5
-
-int mpi_size, mpi_rank;
-
-#if 0
-static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
-static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
- size_t page_buffer_size);
-#endif
-
-/*
- * test file access by communicator besides COMM_WORLD.
- * Split COMM_WORLD into two, one (even_comm) contains the original
- * processes of even ranks. The other (odd_comm) contains the original
- * processes of odd ranks. Processes in even_comm creates a file, then
- * cloose it, using even_comm. Processes in old_comm just do a barrier
- * using odd_comm. Then they all do a barrier using COMM_WORLD.
- * If the file creation and cloose does not do correct collective action
- * according to the communicator argument, the processes will freeze up
- * sooner or later due to barrier mixed up.
- */
-void
-test_split_comm_access(void)
-{
- MPI_Comm comm;
- MPI_Info info = MPI_INFO_NULL;
- int is_old, mrc;
- int newrank, newprocs;
- hid_t fid; /* file IDs */
- hid_t acc_tpl; /* File access properties */
- herr_t ret; /* generic return value */
- const char *filename;
-
- filename = (const char *)PARATESTFILE /* GetTestParameters()*/;
- if (VERBOSE_MED)
- printf("Split Communicator access test on file %s\n", filename);
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- is_old = mpi_rank % 2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
- VRFY((mrc == MPI_SUCCESS), "");
- MPI_Comm_size(comm, &newprocs);
- MPI_Comm_rank(comm, &newrank);
-
- if (is_old) {
- /* odd-rank processes */
- mrc = MPI_Barrier(comm);
- VRFY((mrc == MPI_SUCCESS), "");
- }
- else {
- /* even-rank processes */
- int sub_mpi_rank; /* rank in the sub-comm */
- MPI_Comm_rank(comm, &sub_mpi_rank);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* close the file */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "");
-
- /* delete the test file */
- ret = H5Fdelete(filename, acc_tpl);
- VRFY((ret >= 0), "H5Fdelete succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
- }
- mrc = MPI_Comm_free(&comm);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded");
-}
-
-#if 0
-void
-test_page_buffer_access(void)
-{
- hid_t file_id = -1; /* File ID */
- hid_t fcpl, fapl;
- size_t page_count = 0;
- int i, num_elements = 200;
- haddr_t raw_addr, meta_addr;
- int *data;
- H5F_t *f = NULL;
- herr_t ret; /* generic return value */
- const char *filename;
- bool api_ctx_pushed = false; /* Whether API context pushed */
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- filename = (const char *)GetTestParameters();
-
- if (VERBOSE_MED)
- printf("Page Buffer Usage in Parallel %s\n", filename);
-
- fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl >= 0), "create_faccess_plist succeeded");
- fcpl = H5Pcreate(H5P_FILE_CREATE);
- VRFY((fcpl >= 0), "");
-
- ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
- VRFY((ret == 0), "");
- ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128);
- VRFY((ret == 0), "");
- ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0);
- VRFY((ret == 0), "");
-
- /* This should fail because collective metadata writes are not supported with page buffering */
- H5E_BEGIN_TRY
- {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
- }
- H5E_END_TRY
- VRFY((file_id < 0), "H5Fcreate failed");
-
- /* disable collective metadata writes for page buffering to work */
- ret = H5Pset_coll_metadata_write(fapl, false);
- VRFY((ret >= 0), "");
-
- ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
- VRFY((ret == 0), "");
- ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100,
- sizeof(int) * 100000);
- VRFY((ret == 0), "");
-
- ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
- VRFY((ret == 0), "");
- ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100,
- sizeof(int) * 100000);
- VRFY((ret == 0), "");
-
- ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100);
- VRFY((ret == 0), "");
-
- data = (int *)malloc(sizeof(int) * (size_t)num_elements);
-
- /* initialize all the elements to have a value of -1 */
- for (i = 0; i < num_elements; i++)
- data[i] = -1;
- if (MAINPROCESS) {
- hid_t fapl_self = H5I_INVALID_HID;
- fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
-
- ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
- VRFY((ret == 0), "");
- /* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl_self, false);
- VRFY((ret >= 0), "");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
- VRFY((file_id >= 0), "");
-
- /* Push API context */
- ret = H5CX_push();
- VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = true;
-
- /* Get a pointer to the internal file object */
- f = (H5F_t *)H5I_object(file_id);
-
- VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
-
- /* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
- VRFY((raw_addr != HADDR_UNDEF), "");
-
- /* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
- VRFY((meta_addr != HADDR_UNDEF), "");
-
- page_count = 0;
-
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
- VRFY((ret == 0), "");
-
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update the first 50 elements */
- for (i = 0; i < 50; i++)
- data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- page_count += 2;
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update the second 50 elements */
- for (i = 0; i < 50; i++)
- data[i] = i + 50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update 100 - 200 */
- for (i = 0; i < 100; i++)
- data[i] = i + 100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- ret = H5PB_flush(f->shared);
- VRFY((ret == 0), "");
-
- /* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 200; i++)
- VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 200; i++)
- VRFY((data[i] == i), "Read different values than written");
-
- /* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == i), "Read different values than written");
-
- /* close the file */
- ret = H5Fclose(file_id);
- VRFY((ret >= 0), "H5Fclose succeeded");
- ret = H5Pclose(fapl_self);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* Pop API context */
- if (api_ctx_pushed) {
- ret = H5CX_pop(false);
- VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = false;
- }
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- if (mpi_size > 1) {
- ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
- VRFY((ret == 0), "");
- /* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl, false);
- VRFY((ret >= 0), "");
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
- VRFY((file_id >= 0), "");
-
- /* Push API context */
- ret = H5CX_push();
- VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = true;
-
- /* Get a pointer to the internal file object */
- f = (H5F_t *)H5I_object(file_id);
-
- VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
-
- /* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
- VRFY((raw_addr != HADDR_UNDEF), "");
- /* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
- VRFY((meta_addr != HADDR_UNDEF), "");
-
- page_count = 0;
-
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
- VRFY((ret == 0), "");
-
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update the first 50 elements */
- for (i = 0; i < 50; i++)
- data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update the second 50 elements */
- for (i = 0; i < 50; i++)
- data[i] = i + 50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* update 100 - 200 */
- for (i = 0; i < 100; i++)
- data[i] = i + 100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
- VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((ret == 0), "");
-
- /* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 200; i++)
- VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 200; i++)
- VRFY((data[i] == i), "Read different values than written");
-
- /* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- page_count += 1;
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == i), "Read different values than written");
-
- MPI_Barrier(MPI_COMM_WORLD);
- /* reset the first 50 elements to -1*/
- for (i = 0; i < 50; i++)
- data[i] = -1;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
-
- /* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == -1), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
- VRFY((ret == 0), "");
- VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i = 0; i < 50; i++)
- VRFY((data[i] == -1), "Read different values than written");
-
- /* close the file */
- ret = H5Fclose(file_id);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Pclose(fcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* Pop API context */
- if (api_ctx_pushed) {
- ret = H5CX_pop(false);
- VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = false;
- }
-
- free(data);
- data = NULL;
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-static int
-create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
-{
- hid_t file_id, dset_id, grp_id;
- hid_t sid, mem_dataspace;
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- DATATYPE *data_array = NULL;
- hsize_t dims[RANK], i;
- hsize_t num_elements;
- int k;
- char dset_name[20];
- H5F_t *f = NULL;
- H5C_t *cache_ptr = NULL;
- H5AC_cache_config_t config;
- bool api_ctx_pushed = false; /* Whether API context pushed */
- herr_t ret;
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
- VRFY((file_id >= 0), "");
-
- ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((ret == 0), "");
-
- /* Push API context */
- ret = H5CX_push();
- VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = true;
-
- f = (H5F_t *)H5I_object(file_id);
- VRFY((f != NULL), "");
-
- cache_ptr = f->shared->cache;
- VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
-
- cache_ptr->ignore_tags = true;
- H5C_stats__reset(cache_ptr);
- config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
-
- ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config);
- VRFY((ret == 0), "");
-
- config.metadata_write_strategy = metadata_write_strategy;
-
- ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config);
- VRFY((ret == 0), "");
-
- grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((grp_id >= 0), "");
-
- dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
- dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- /* Each process takes a slabs of rows. */
- block[0] = dims[0] / (hsize_t)mpi_size;
- block[1] = dims[1];
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
-
- num_elements = block[0] * block[1];
- /* allocate memory for data buffer */
- data_array = (DATATYPE *)malloc(num_elements * sizeof(DATATYPE));
- VRFY((data_array != NULL), "data_array malloc succeeded");
- /* put some trivial data in the data_array */
- for (i = 0; i < num_elements; i++)
- data_array[i] = mpi_rank + 1;
-
- ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- for (k = 0; k < NUM_DSETS; k++) {
- snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
- ret = H5Dclose(dset_id);
- VRFY((ret == 0), "");
-
- snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
- ret = H5Dclose(dset_id);
- VRFY((ret == 0), "");
-
- snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
- ret = H5Dclose(dset_id);
- VRFY((ret == 0), "");
-
- snprintf(dset_name, sizeof(dset_name), "dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
-
- ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
- VRFY((ret == 0), "");
-
- ret = H5Dclose(dset_id);
- VRFY((ret == 0), "");
-
- memset(data_array, 0, num_elements * sizeof(DATATYPE));
- dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
-
- ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
- VRFY((ret == 0), "");
-
- ret = H5Dclose(dset_id);
- VRFY((ret == 0), "");
-
- for (i = 0; i < num_elements; i++)
- VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
-
- snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
- ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
- VRFY((ret == 0), "");
- snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
- ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
- VRFY((ret == 0), "");
- snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
- ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
- VRFY((ret == 0), "");
- }
-
- ret = H5Gclose(grp_id);
- VRFY((ret == 0), "");
- ret = H5Fclose(file_id);
- VRFY((ret == 0), "");
- ret = H5Sclose(sid);
- VRFY((ret == 0), "");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret == 0), "");
-
- /* Pop API context */
- if (api_ctx_pushed) {
- ret = H5CX_pop(false);
- VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = false;
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
- free(data_array);
- return 0;
-} /* create_file */
-
-static int
-open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
- size_t page_buffer_size)
-{
- hid_t file_id, dset_id, grp_id, grp_id2;
- hid_t sid, mem_dataspace;
- DATATYPE *data_array = NULL;
- hsize_t dims[RANK];
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- int i, k, ndims;
- hsize_t num_elements;
- char dset_name[20];
- H5F_t *f = NULL;
- H5C_t *cache_ptr = NULL;
- H5AC_cache_config_t config;
- bool api_ctx_pushed = false; /* Whether API context pushed */
- herr_t ret;
-
- config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- ret = H5Pget_mdc_config(fapl, &config);
- VRFY((ret == 0), "");
-
- config.metadata_write_strategy = metadata_write_strategy;
-
- ret = H5Pget_mdc_config(fapl, &config);
- VRFY((ret == 0), "");
-
- file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((file_id >= 0), "");
-
- /* Push API context */
- ret = H5CX_push();
- VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = true;
-
- ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((ret == 0), "");
-
- f = (H5F_t *)H5I_object(file_id);
- VRFY((f != NULL), "");
-
- cache_ptr = f->shared->cache;
- VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- VRFY((f->shared->page_buf != NULL), "");
- VRFY((f->shared->page_buf->page_size == page_size), "");
- VRFY((f->shared->page_buf->max_size == page_buffer_size), "");
-
- grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
- VRFY((grp_id >= 0), "");
-
- dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
- dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
-
- /* Each process takes a slabs of rows. */
- block[0] = dims[0] / (hsize_t)mpi_size;
- block[1] = dims[1];
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
-
- num_elements = block[0] * block[1];
- /* allocate memory for data buffer */
- data_array = (DATATYPE *)malloc(num_elements * sizeof(DATATYPE));
- VRFY((data_array != NULL), "data_array malloc succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- for (k = 0; k < NUM_DSETS; k++) {
- snprintf(dset_name, sizeof(dset_name), "dset%d", k);
- dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
- VRFY((dset_id >= 0), "");
-
- sid = H5Dget_space(dset_id);
- VRFY((dset_id >= 0), "H5Dget_space succeeded");
-
- ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
- VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
- VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
-
- ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
- VRFY((ret >= 0), "");
-
- ret = H5Dclose(dset_id);
- VRFY((ret >= 0), "");
- ret = H5Sclose(sid);
- VRFY((ret == 0), "");
-
- for (i = 0; i < (int)num_elements; i++)
- VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
- }
-
- grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((grp_id2 >= 0), "");
- ret = H5Gclose(grp_id2);
- VRFY((ret == 0), "");
-
- ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((ret == 0), "");
-
- MPI_Barrier(MPI_COMM_WORLD);
- /* flush invalidate each ring, starting from the outermost ring and
- * working inward.
- */
- for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
- H5C_cache_entry_t *entry_ptr = NULL;
-
- entry_ptr = cache_ptr->index[i];
-
- while (entry_ptr != NULL) {
- assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- assert(entry_ptr->is_dirty == false);
-
- if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
- ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
- VRFY((ret == 0), "");
- }
-
- entry_ptr = entry_ptr->ht_next;
- }
- }
- MPI_Barrier(MPI_COMM_WORLD);
-
- grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT);
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((grp_id2 >= 0), "");
- ret = H5Gclose(grp_id2);
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((ret == 0), "");
-
- ret = H5Gclose(grp_id);
- VRFY((ret == 0), "");
- ret = H5Fclose(file_id);
- VRFY((ret == 0), "");
- ret = H5Sclose(mem_dataspace);
- VRFY((ret == 0), "");
-
- /* Pop API context */
- if (api_ctx_pushed) {
- ret = H5CX_pop(false);
- VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = false;
- }
-
- free(data_array);
-
- return nerrors;
-}
-#endif
-
-/*
- * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
- * incoming fapl that could conflict with the existing values in H5F_shared_t on
- * multiple opens of the same file.
- */
-void
-test_file_properties(void)
-{
- hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
- hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
- hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
- bool is_coll;
- htri_t are_equal;
- const char *filename;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- MPI_Comm comm_out = MPI_COMM_NULL;
- MPI_Info info_out = MPI_INFO_NULL;
- herr_t ret; /* Generic return value */
- int mpi_ret; /* MPI return value */
- int cmp; /* Compare value */
-
- /* set up MPI parameters */
- mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
- mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
-
- mpi_ret = MPI_Info_create(&info);
- VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
- mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
- VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
-
- /* setup file access plist */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
- ret = H5Pset_fapl_mpio(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_fapl_mpio");
-
- /* Check getting and setting MPI properties
- * (for use in VOL connectors, not the MPI-I/O VFD)
- */
- ret = H5Pset_mpi_params(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
- ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out);
- VRFY((ret >= 0), "H5Pget_mpi_params succeeded");
-
- /* Check the communicator */
- VRFY((comm != comm_out), "Communicators should not be bitwise identical");
- cmp = MPI_UNEQUAL;
- mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
- VRFY((ret >= 0), "MPI_Comm_compare succeeded");
- VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
-
- /* Check the info object */
- VRFY((info != info_out), "Info objects should not be bitwise identical");
-
- /* Free the obtained comm and info object */
- mpi_ret = MPI_Comm_free(&comm_out);
- VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded");
- mpi_ret = MPI_Info_free(&info_out);
- VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
-
- /* Copy the fapl and ensure it's equal to the original */
- fapl_copy_id = H5Pcopy(fapl_id);
- VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
- are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((true == are_equal), "H5Pequal");
-
- /* Add a property to the copy and ensure it's different now */
- mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
- VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
- ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
- VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
- are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((false == are_equal), "H5Pequal");
-
- /* Add a property with the same key but a different value to the original
- * and ensure they are still different.
- */
- mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk");
- VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
- ret = H5Pset_mpi_params(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
- are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((false == are_equal), "H5Pequal");
-
- /* Set the second property in the original to the same
- * value as the copy and ensure they are the same now.
- */
- mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
- VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
- ret = H5Pset_mpi_params(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
- are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((true == are_equal), "H5Pequal");
-
- /* create the file */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
-
- /* verify settings for file access properties */
-
- /* Collective metadata writes */
- ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
-
- /* Collective metadata read API calling requirement */
- ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- /* Open the file with the MPI-IO driver */
- ret = H5Pset_fapl_mpio(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
- fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
-
- /* verify settings for file access properties */
-
- /* Collective metadata writes */
- ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
-
- /* Collective metadata read API calling requirement */
- ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- /* Open the file with the MPI-IO driver w/ collective settings */
- ret = H5Pset_fapl_mpio(fapl_id, comm, info);
- VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
- /* Collective metadata writes */
- ret = H5Pset_coll_metadata_write(fapl_id, true);
- VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- /* Collective metadata read API calling requirement */
- ret = H5Pset_all_coll_metadata_ops(fapl_id, true);
- VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
-
- /* verify settings for file access properties */
-
- /* Collective metadata writes */
- ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
-
- /* Collective metadata read API calling requirement */
- ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
-
- /* close fapl and retrieve it from file */
- ret = H5Pclose(fapl_id);
- VRFY((ret >= 0), "H5Pclose succeeded");
- fapl_id = H5I_INVALID_HID;
-
- fapl_id = H5Fget_access_plist(fid);
- VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS");
-
- /* verify settings for file access properties */
-
- /* Collective metadata writes */
- ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
-
- /* Collective metadata read API calling requirement */
- ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
- VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
-
- /* close file */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- /* Release file-access plist */
- ret = H5Pclose(fapl_id);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Pclose(fapl_copy_id);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* Free the MPI info object */
- mpi_ret = MPI_Info_free(&info);
- VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
-
-} /* end test_file_properties() */
-
-void
-test_delete(void)
-{
- hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
- hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
- const char *filename = NULL;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */
- herr_t ret; /* Generic return value */
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or file more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* setup file access plist */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
- ret = H5Pset_fapl_mpio(fapl_id, comm, info);
- VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
-
- /* create the file */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate");
-
- /* close the file */
- ret = H5Fclose(fid);
- VRFY((SUCCEED == ret), "H5Fclose");
-
- /* Verify that the file is an HDF5 file */
- is_hdf5 = H5Fis_accessible(filename, fapl_id);
- VRFY((true == is_hdf5), "H5Fis_accessible");
-
- /* Delete the file */
- ret = H5Fdelete(filename, fapl_id);
- VRFY((SUCCEED == ret), "H5Fdelete");
-
- /* Verify that the file is NO LONGER an HDF5 file */
- /* This should fail since there is no file */
- H5E_BEGIN_TRY
- {
- is_hdf5 = H5Fis_accessible(filename, fapl_id);
- }
- H5E_END_TRY
- VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible");
-
- /* Release file-access plist */
- ret = H5Pclose(fapl_id);
- VRFY((SUCCEED == ret), "H5Pclose");
-
-} /* end test_delete() */
diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c
deleted file mode 100644
index 3b582ad..0000000
--- a/testpar/API/t_file_image.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Parallel tests for file image operations
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-/* file_image_daisy_chain_test
- *
- * Process zero:
- *
- * 1) Creates a core file with an integer vector data set of
- * length n (= mpi_size),
- *
- * 2) Initializes the vector to zero in * location 0, and to -1
- * everywhere else.
- *
- * 3) Flushes the core file, and gets an image of it. Closes
- * the core file.
- *
- * 4) Sends the image to process 1.
- *
- * 5) Awaits receipt on a file image from process n-1.
- *
- * 6) opens the image received from process n-1, verifies that
- * it contains a vector of length equal to mpi_size, and
- * that the vector contains (0, 1, 2, ... n-1)
- *
- * 7) closes the core file and exits.
- *
- * Process i (0 < i < n)
- *
- * 1) Await receipt of file image from process (i - 1).
- *
- * 2) Open the image with the core file driver, verify that i
- * contains a vector v of length, and that v[j] = j for
- * 0 <= j < i, and that v[j] == -1 for i <= j < n
- *
- * 3) Set v[i] = i in the core file.
- *
- * 4) Flush the core file and send it to process (i + 1) % n.
- *
- * 5) close the core file and exit.
- *
- * Test fails on a hang (if an image is not received), or on invalid data.
- *
- * JRM -- 11/28/11
- */
-void
-file_image_daisy_chain_test(void)
-{
- char file_name[1024] = "\0";
- int mpi_size, mpi_rank;
- int mpi_result;
- int i;
- int space_ndims;
- MPI_Status rcvstat;
- int *vector_ptr = NULL;
- hid_t fapl_id = -1;
- hid_t file_id; /* file IDs */
- hid_t dset_id = -1;
- hid_t dset_type_id = -1;
- hid_t space_id = -1;
- herr_t err;
- hsize_t dims[1];
- void *image_ptr = NULL;
- ssize_t bytes_read;
- ssize_t image_len;
- bool vector_ok = true;
- htri_t tri_result;
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* setup file name */
- snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
-
- if (mpi_rank == 0) {
-
- /* 1) Creates a core file with an integer vector data set
- * of length mpi_size,
- */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id >= 0), "creating fapl");
-
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
- VRFY((err >= 0), "setting core file driver in fapl.");
-
- file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "created core file");
-
- dims[0] = (hsize_t)mpi_size;
- space_id = H5Screate_simple(1, dims, dims);
- VRFY((space_id >= 0), "created data space");
-
- dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "created data set");
-
- /* 2) Initialize the vector to zero in location 0, and
- * to -1 everywhere else.
- */
-
- vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int));
- VRFY((vector_ptr != NULL), "allocated in memory representation of vector");
-
- vector_ptr[0] = 0;
- for (i = 1; i < mpi_size; i++)
- vector_ptr[i] = -1;
-
- err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
- VRFY((err >= 0), "wrote initial data to vector.");
-
- free(vector_ptr);
- vector_ptr = NULL;
-
- /* 3) Flush the core file, and get an image of it. Close
- * the core file.
- */
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "flushed core file.");
-
- image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
- VRFY((image_len > 0), "got image file size");
-
- image_ptr = (void *)malloc((size_t)image_len);
- VRFY(image_ptr != NULL, "allocated file image buffer.");
-
- bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
- VRFY(bytes_read == image_len, "wrote file into image buffer");
-
- err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
-
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
-
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
-
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
-
- /* 4) Send the image to process 1. */
-
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
-
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
-
- free(image_ptr);
- image_ptr = NULL;
- image_len = 0;
-
- /* 5) Await receipt on a file image from process n-1. */
-
- mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0,
- MPI_COMM_WORLD, &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
-
- image_ptr = (void *)malloc((size_t)image_len);
- VRFY(image_ptr != NULL, "allocated file image receive buffer.");
-
- mpi_result =
- MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1");
-
- /* 6) open the image received from process n-1, verify that
- * it contains a vector of length equal to mpi_size, and
- * that the vector contains (0, 1, 2, ... n-1).
- */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id >= 0), "creating fapl");
-
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
- VRFY((err >= 0), "setting core file driver in fapl.");
-
- err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
- VRFY((err >= 0), "set file image in fapl.");
-
- file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
- VRFY((file_id >= 0), "opened received file image file");
-
- dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
- VRFY((dset_id >= 0), "opened data set");
-
- dset_type_id = H5Dget_type(dset_id);
- VRFY((dset_type_id >= 0), "obtained data set type");
-
- tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == true), "verified data set type");
-
- space_id = H5Dget_space(dset_id);
- VRFY((space_id >= 0), "opened data space");
-
- space_ndims = H5Sget_simple_extent_ndims(space_id);
- VRFY((space_ndims == 1), "verified data space num dims(1)");
-
- space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
- VRFY((space_ndims == 1), "verified data space num dims(2)");
- VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
-
- vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int));
- VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
-
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
- VRFY((err >= 0), "read received vector.");
-
- vector_ok = true;
- for (i = 0; i < mpi_size; i++)
- if (vector_ptr[i] != i)
- vector_ok = false;
- VRFY((vector_ok), "verified received vector.");
-
- free(vector_ptr);
- vector_ptr = NULL;
-
- /* 7) closes the core file and exit. */
-
- err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
-
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
-
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
-
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
-
- free(image_ptr);
- image_ptr = NULL;
- image_len = 0;
- }
- else {
- /* 1) Await receipt of file image from process (i - 1). */
-
- mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0,
- MPI_COMM_WORLD, &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1");
-
- image_ptr = (void *)malloc((size_t)image_len);
- VRFY(image_ptr != NULL, "allocated file image receive buffer.");
-
- mpi_result =
- MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1");
-
- /* 2) Open the image with the core file driver, verify that it
- * contains a vector v of length, and that v[j] = j for
- * 0 <= j < i, and that v[j] == -1 for i <= j < n
- */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id >= 0), "creating fapl");
-
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
- VRFY((err >= 0), "setting core file driver in fapl.");
-
- err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
- VRFY((err >= 0), "set file image in fapl.");
-
- file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
- H5Eprint2(H5P_DEFAULT, stderr);
- VRFY((file_id >= 0), "opened received file image file");
-
- dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
- VRFY((dset_id >= 0), "opened data set");
-
- dset_type_id = H5Dget_type(dset_id);
- VRFY((dset_type_id >= 0), "obtained data set type");
-
- tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == true), "verified data set type");
-
- space_id = H5Dget_space(dset_id);
- VRFY((space_id >= 0), "opened data space");
-
- space_ndims = H5Sget_simple_extent_ndims(space_id);
- VRFY((space_ndims == 1), "verified data space num dims(1)");
-
- space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
- VRFY((space_ndims == 1), "verified data space num dims(2)");
- VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
-
- vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int));
- VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
-
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
- VRFY((err >= 0), "read received vector.");
-
- vector_ok = true;
- for (i = 0; i < mpi_size; i++) {
- if (i < mpi_rank) {
- if (vector_ptr[i] != i)
- vector_ok = false;
- }
- else {
- if (vector_ptr[i] != -1)
- vector_ok = false;
- }
- }
- VRFY((vector_ok), "verified received vector.");
-
- /* 3) Set v[i] = i in the core file. */
-
- vector_ptr[mpi_rank] = mpi_rank;
-
- err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
- VRFY((err >= 0), "wrote modified data to vector.");
-
- free(vector_ptr);
- vector_ptr = NULL;
-
- /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
-
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "flushed core file.");
-
- image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
- VRFY((image_len > 0), "got (possibly modified) image file len");
-
- image_ptr = (void *)realloc((void *)image_ptr, (size_t)image_len);
- VRFY(image_ptr != NULL, "re-allocated file image buffer.");
-
- bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
- VRFY(bytes_read == image_len, "wrote file into image buffer");
-
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE,
- (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size");
-
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
- MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size");
-
- free(image_ptr);
- image_ptr = NULL;
- image_len = 0;
-
- /* 5) close the core file and exit. */
-
- err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
-
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
-
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
-
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
- }
-
- return;
-
-} /* file_image_daisy_chain_test() */
diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c
deleted file mode 100644
index 7275dd9..0000000
--- a/testpar/API/t_filter_read.c
+++ /dev/null
@@ -1,532 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * This verifies the correctness of parallel reading of a dataset that has been
- * written serially using filters.
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#ifdef H5_HAVE_SZLIB_H
-#include "szlib.h"
-#endif
-
-static int mpi_size, mpi_rank;
-
-/* Chunk sizes */
-#define CHUNK_DIM1 7
-#define CHUNK_DIM2 27
-
-/* Sizes of the vertical hyperslabs. Total dataset size is
- {HS_DIM1, HS_DIM2 * mpi_size } */
-#define HS_DIM1 200
-#define HS_DIM2 100
-
-#ifdef H5_HAVE_FILTER_SZIP
-
-/*-------------------------------------------------------------------------
- * Function: h5_szip_can_encode
- *
- * Purpose: Retrieve the filter config flags for szip, tell if
- * encoder is available.
- *
- * Return: 1: decode+encode is enabled
- * 0: only decode is enabled
- * -1: other
- *-------------------------------------------------------------------------
- */
-int
-h5_szip_can_encode(void)
-{
- unsigned int filter_config_flags;
-
- H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags);
- if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) {
- /* filter present but neither encode nor decode is supported (???) */
- return -1;
- }
- else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
- H5Z_FILTER_CONFIG_DECODE_ENABLED) {
- /* decoder only: read but not write */
- return 0;
- }
- else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
- H5Z_FILTER_CONFIG_ENCODE_ENABLED) {
- /* encoder only: write but not read (???) */
- return -1;
- }
- else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
- (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) {
- return 1;
- }
- return (-1);
-}
-#endif /* H5_HAVE_FILTER_SZIP */
-
-/*-------------------------------------------------------------------------
- * Function: filter_read_internal
- *
- * Purpose: Tests parallel reading of a 2D dataset written serially using
- * filters. During the parallel reading phase, the dataset is
- * divided evenly among the processors in vertical hyperslabs.
- *-------------------------------------------------------------------------
- */
-static void
-filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
-{
- hid_t file, dataset; /* HDF5 IDs */
- hid_t access_plist; /* Access property list ID */
- hid_t sid, memspace; /* Dataspace IDs */
- hsize_t size[2]; /* Dataspace dimensions */
- hsize_t hs_offset[2]; /* Hyperslab offset */
- hsize_t hs_size[2]; /* Hyperslab size */
- size_t i, j; /* Local index variables */
- char name[32] = "dataset";
- herr_t hrc; /* Error status */
- int *points = NULL; /* Writing buffer for entire dataset */
- int *check = NULL; /* Reading buffer for selected hyperslab */
-
- (void)dset_size; /* silence compiler */
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* set sizes for dataset and hyperslabs */
- hs_size[0] = size[0] = HS_DIM1;
- hs_size[1] = HS_DIM2;
-
- size[1] = hs_size[1] * (hsize_t)mpi_size;
-
- hs_offset[0] = 0;
- hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank;
-
- /* Create the data space */
- sid = H5Screate_simple(2, size, NULL);
- VRFY(sid >= 0, "H5Screate_simple");
-
- /* Create buffers */
- points = (int *)malloc(size[0] * size[1] * sizeof(int));
- VRFY(points != NULL, "malloc");
-
- check = (int *)malloc(hs_size[0] * hs_size[1] * sizeof(int));
- VRFY(check != NULL, "malloc");
-
- /* Initialize writing buffer with random data */
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- points[i * size[1] + j] = (int)(i + j + 7);
-
- VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability");
-
- /* Serial write phase */
- if (MAINPROCESS) {
-
- file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY(file >= 0, "H5Fcreate");
-
- /* Create the dataset */
- dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY(dataset >= 0, "H5Dcreate2");
-
- hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points);
- VRFY(hrc >= 0, "H5Dwrite");
-#if 0
- *dset_size = H5Dget_storage_size(dataset);
- VRFY(*dset_size > 0, "H5Dget_storage_size");
-#endif
-
- hrc = H5Dclose(dataset);
- VRFY(hrc >= 0, "H5Dclose");
-
- hrc = H5Fclose(file);
- VRFY(hrc >= 0, "H5Fclose");
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* Parallel read phase */
- /* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((access_plist >= 0), "H5Pcreate");
-
- hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((hrc >= 0), "H5Pset_fapl_mpio");
-
- /* Open the file */
- file = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
- VRFY((file >= 0), "H5Fopen");
-
- dataset = H5Dopen2(file, name, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2");
-
- hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
- VRFY(hrc >= 0, "H5Sselect_hyperslab");
-
- memspace = H5Screate_simple(2, hs_size, NULL);
- VRFY(memspace >= 0, "H5Screate_simple");
-
- hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check);
- VRFY(hrc >= 0, "H5Dread");
-
- /* Check that the values read are the same as the values written */
- for (i = 0; i < hs_size[0]; i++) {
- for (j = 0; j < hs_size[1]; j++) {
- if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) {
- fprintf(stderr, " Read different values than written.\n");
- fprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i),
- (unsigned long)(hs_offset[1] + j));
- fprintf(stderr, " At original: %d\n", (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
- fprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
- VRFY(false, "");
- }
- }
- }
-#if 0
- /* Get the storage size of the dataset */
- *dset_size = H5Dget_storage_size(dataset);
- VRFY(*dset_size != 0, "H5Dget_storage_size");
-#endif
-
- /* Clean up objects used for this test */
- hrc = H5Dclose(dataset);
- VRFY(hrc >= 0, "H5Dclose");
-
- hrc = H5Sclose(sid);
- VRFY(hrc >= 0, "H5Sclose");
-
- hrc = H5Sclose(memspace);
- VRFY(hrc >= 0, "H5Sclose");
-
- hrc = H5Pclose(access_plist);
- VRFY(hrc >= 0, "H5Pclose");
-
- hrc = H5Fclose(file);
- VRFY(hrc >= 0, "H5Fclose");
-
- free(points);
- free(check);
-
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*-------------------------------------------------------------------------
- * Function: test_filter_read
- *
- * Purpose: Tests parallel reading of datasets written serially using
- * several (combinations of) filters.
- *-------------------------------------------------------------------------
- */
-
-void
-test_filter_read(void)
-{
- hid_t dc; /* HDF5 IDs */
- const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
-#if 0
- hsize_t null_size; /* Size of dataset without filters */
-#endif
- unsigned chunk_opts; /* Chunk options */
- unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- herr_t hrc;
- const char *filename;
-#ifdef H5_HAVE_FILTER_FLETCHER32
- hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
-#endif
-
-#ifdef H5_HAVE_FILTER_DEFLATE
- hsize_t deflate_size; /* Size of dataset with deflate filter */
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-#ifdef H5_HAVE_FILTER_SZIP
- hsize_t szip_size; /* Size of dataset with szip filter */
- unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK;
- unsigned szip_pixels_per_block = 4;
-#endif /* H5_HAVE_FILTER_SZIP */
-
-#if 0
- hsize_t shuffle_size; /* Size of dataset with shuffle filter */
-#endif
-
-#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
- hsize_t combo_size; /* Size of dataset with multiple filters */
-#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- if (VERBOSE_MED)
- printf("Parallel reading of dataset written with filters %s\n", filename);
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(
- " API functions for basic file, dataset or filter aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /*----------------------------------------------------------
- * STEP 0: Test without filters.
- *----------------------------------------------------------
- */
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- filter_read_internal(filename, dc, /* &null_size */ NULL);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
- /* Run steps 1-3 both with and without filters disabled on partial chunks */
- for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
- disable_partial_chunk_filters++) {
- /* Set chunk options appropriately */
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_filter");
-
- hrc = H5Pget_chunk_opts(dc, &chunk_opts);
- VRFY(hrc >= 0, "H5Pget_chunk_opts");
-
- if (disable_partial_chunk_filters)
- chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
-
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
- /*----------------------------------------------------------
- * STEP 1: Test Fletcher32 Checksum by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_FLETCHER32
-
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pset_filter");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_filter");
-
- hrc = H5Pset_chunk_opts(dc, chunk_opts);
- VRFY(hrc >= 0, "H5Pset_chunk_opts");
-
- hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL);
- VRFY(hrc >= 0, "H5Pset_filter");
-
- filter_read_internal(filename, dc, &fletcher32_size);
- VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
-
- /*----------------------------------------------------------
- * STEP 2: Test deflation by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
-
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_chunk_opts(dc, chunk_opts);
- VRFY(hrc >= 0, "H5Pset_chunk_opts");
-
- hrc = H5Pset_deflate(dc, 6);
- VRFY(hrc >= 0, "H5Pset_deflate");
-
- filter_read_internal(filename, dc, &deflate_size);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
- /*----------------------------------------------------------
- * STEP 3: Test szip compression by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_SZIP
- if (h5_szip_can_encode() == 1) {
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_chunk_opts(dc, chunk_opts);
- VRFY(hrc >= 0, "H5Pset_chunk_opts");
-
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc >= 0, "H5Pset_szip");
-
- filter_read_internal(filename, dc, &szip_size);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
- }
-#endif /* H5_HAVE_FILTER_SZIP */
- } /* end for */
-
- /*----------------------------------------------------------
- * STEP 4: Test shuffling by itself.
- *----------------------------------------------------------
- */
-
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_shuffle(dc);
- VRFY(hrc >= 0, "H5Pset_shuffle");
-
- filter_read_internal(filename, dc, /* &shuffle_size */ NULL);
-#if 0
- VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
-#endif
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
- /*----------------------------------------------------------
- * STEP 5: Test shuffle + deflate + checksum in any order.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
- /* Testing shuffle+deflate+checksum filters (checksum first) */
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_fletcher32(dc);
- VRFY(hrc >= 0, "H5Pset_fletcher32");
-
- hrc = H5Pset_shuffle(dc);
- VRFY(hrc >= 0, "H5Pset_shuffle");
-
- hrc = H5Pset_deflate(dc, 6);
- VRFY(hrc >= 0, "H5Pset_deflate");
-
- filter_read_internal(filename, dc, &combo_size);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
- /* Testing shuffle+deflate+checksum filters (checksum last) */
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_shuffle(dc);
- VRFY(hrc >= 0, "H5Pset_shuffle");
-
- hrc = H5Pset_deflate(dc, 6);
- VRFY(hrc >= 0, "H5Pset_deflate");
-
- hrc = H5Pset_fletcher32(dc);
- VRFY(hrc >= 0, "H5Pset_fletcher32");
-
- filter_read_internal(filename, dc, &combo_size);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
- /*----------------------------------------------------------
- * STEP 6: Test shuffle + szip + checksum in any order.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_SZIP
-
- /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_fletcher32(dc);
- VRFY(hrc >= 0, "H5Pset_fletcher32");
-
- hrc = H5Pset_shuffle(dc);
- VRFY(hrc >= 0, "H5Pset_shuffle");
-
- /* Make sure encoding is enabled */
- if (h5_szip_can_encode() == 1) {
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc >= 0, "H5Pset_szip");
-
- filter_read_internal(filename, dc, &combo_size);
- }
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
-
- /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
- /* Make sure encoding is enabled */
- if (h5_szip_can_encode() == 1) {
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc >= 0, "H5Pcreate");
-
- hrc = H5Pset_chunk(dc, 2, chunk_size);
- VRFY(hrc >= 0, "H5Pset_chunk");
-
- hrc = H5Pset_shuffle(dc);
- VRFY(hrc >= 0, "H5Pset_shuffle");
-
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc >= 0, "H5Pset_szip");
-
- hrc = H5Pset_fletcher32(dc);
- VRFY(hrc >= 0, "H5Pset_fletcher32");
-
- filter_read_internal(filename, dc, &combo_size);
-
- /* Clean up objects used for this test */
- hrc = H5Pclose(dc);
- VRFY(hrc >= 0, "H5Pclose");
- }
-
-#endif /* H5_HAVE_FILTER_SZIP */
-}
diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c
deleted file mode 100644
index 7c97898..0000000
--- a/testpar/API/t_mdset.c
+++ /dev/null
@@ -1,2827 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#if 0
-#include "H5Dprivate.h"
-#include "H5private.h"
-#endif
-
-#define DIM 2
-#define SIZE 32
-#define NDATASET 4
-#define GROUP_DEPTH 32
-enum obj_type { is_group, is_dset };
-
-static int get_size(void);
-static void write_dataset(hid_t, hid_t, hid_t);
-static int read_dataset(hid_t, hid_t, hid_t);
-static void create_group_recursive(hid_t, hid_t, hid_t, int);
-static void recursive_read_group(hid_t, hid_t, hid_t, int);
-static void group_dataset_read(hid_t fid, int mpi_rank, int m);
-static void write_attribute(hid_t, int, int);
-static int read_attribute(hid_t, int, int);
-static int check_value(DATATYPE *, DATATYPE *, int);
-static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
-
-/*
- * The size value computed by this function is used extensively in
- * configuring tests for the current number of processes.
- *
- * This function was created as part of an effort to allow the
- * test functions in this file to run on an arbitrary number of
- * processors.
- * JRM - 8/11/04
- */
-
-static int
-get_size(void)
-{
- int mpi_rank;
- int mpi_size;
- int size = SIZE;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- if (mpi_size > size) {
- if ((mpi_size % 2) == 0) {
- size = mpi_size;
- }
- else {
- size = mpi_size + 1;
- }
- }
-
- VRFY((mpi_size <= size), "mpi_size <= size");
- VRFY(((size % 2) == 0), "size isn't even");
-
- return (size);
-
-} /* get_size() */
-
-/*
- * Example of using PHDF5 to create a zero sized dataset.
- *
- */
-void
-zero_dim_dset(void)
-{
- int mpi_size, mpi_rank;
- const char *filename;
- hid_t fid, plist, dcpl, dsid, sid;
- hsize_t dim, chunk_dim;
- herr_t ret;
- int data[1];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((plist >= 0), "create_faccess_plist succeeded");
-
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- VRFY((fid >= 0), "H5Fcreate succeeded");
- ret = H5Pclose(plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "failed H5Pcreate");
-
- /* Set 1 chunk size */
- chunk_dim = 1;
- ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
- VRFY((ret >= 0), "failed H5Pset_chunk");
-
- /* Create 1D dataspace with 0 dim size */
- dim = 0;
- sid = H5Screate_simple(1, &dim, NULL);
- VRFY((sid >= 0), "failed H5Screate_simple");
-
- /* Create chunked dataset */
- dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dsid >= 0), "failed H5Dcreate2");
-
- /* write 0 elements from dataset */
- ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
- VRFY((ret >= 0), "failed H5Dwrite");
-
- /* Read 0 elements from dataset */
- ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
- VRFY((ret >= 0), "failed H5Dread");
-
- H5Pclose(dcpl);
- H5Dclose(dsid);
- H5Sclose(sid);
- H5Fclose(fid);
-}
-
-/*
- * Example of using PHDF5 to create ndatasets datasets. Each process write
- * a slab of array to the file.
- */
-void
-multiple_dset_write(void)
-{
- int i, j, n, mpi_size, mpi_rank, size;
- hid_t iof, plist, dataset, memspace, filespace;
- hid_t dcpl; /* Dataset creation property list */
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM];
- hsize_t count[DIM] = {1, 1};
- double *outme = NULL;
- double fill = 1.0; /* Fill value */
- char dname[100];
- herr_t ret;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
- int ndatasets;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
- /* ndatasets = pt->count; */ ndatasets = NDATASETS;
-
- size = get_size();
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- outme = malloc((size_t)size * (size_t)size * sizeof(double));
- VRFY((outme != NULL), "malloc succeeded for outme");
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((plist >= 0), "create_faccess_plist succeeded");
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- VRFY((iof >= 0), "H5Fcreate succeeded");
- ret = H5Pclose(plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
-
- memspace = H5Screate_simple(DIM, chunk_dims, NULL);
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- VRFY((ret >= 0), "mdata hyperslab selection");
-
- /* Create a dataset creation property list */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation property list succeeded");
-
- ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
- VRFY((ret >= 0), "set fill-value succeeded");
-
- for (n = 0; n < ndatasets; n++) {
- snprintf(dname, sizeof(dname), "dataset %d", n);
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), dname);
-
- /* calculate data to write */
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- outme[(i * size) + j] = n * 1000 + mpi_rank;
-
- H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
-
- H5Dclose(dataset);
-#ifdef BARRIER_CHECKS
- if (!((n + 1) % 10)) {
- printf("created %d datasets\n", n + 1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
-#endif /* BARRIER_CHECKS */
- }
-
- H5Sclose(filespace);
- H5Sclose(memspace);
- H5Pclose(dcpl);
- H5Fclose(iof);
-
- free(outme);
-}
-
-/* Example of using PHDF5 to create, write, and read compact dataset.
- */
-void
-compact_dataset(void)
-{
- int i, j, mpi_size, mpi_rank, size, err_num = 0;
- hid_t iof, plist, dcpl, dxpl, dataset, filespace;
- hsize_t file_dims[DIM];
- double *outme;
- double *inme;
- char dname[] = "dataset";
- herr_t ret;
- const char *filename;
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- bool prop_value;
-#endif
-
- size = get_size();
-
- for (i = 0; i < DIM; i++)
- file_dims[i] = (hsize_t)size;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- outme = malloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
- VRFY((outme != NULL), "malloc succeeded for outme");
-
- inme = malloc((size_t)size * (size_t)size * sizeof(double));
- VRFY((outme != NULL), "malloc succeeded for inme");
-
- filename = PARATESTFILE /* GetTestParameters() */;
- VRFY((mpi_size <= size), "mpi_size <= size");
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
-
- /* Define data space */
- filespace = H5Screate_simple(DIM, file_dims, NULL);
-
- /* Create a compact dataset */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation property list succeeded");
- ret = H5Pset_layout(dcpl, H5D_COMPACT);
- VRFY((dcpl >= 0), "set property list for compact dataset");
- ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
- VRFY((ret >= 0), "set space allocation time for compact dataset");
-
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- /* set up the collective transfer properties list */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "");
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Recalculate data to write. Each process writes the same data. */
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- outme[(i * size) + j] = (i + j) * 1000;
-
- ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- H5Pclose(dcpl);
- H5Pclose(plist);
- H5Dclose(dataset);
- H5Sclose(filespace);
- H5Fclose(iof);
-
- /* Open the file and dataset, read and compare the data. */
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
- VRFY((iof >= 0), "H5Fopen succeeded");
-
- /* set up the collective transfer properties list */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "");
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
- NULL, NULL, NULL, NULL, NULL);
- VRFY((ret >= 0), "H5Pinsert2() succeeded");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
- VRFY((ret >= 0), "H5Dread succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = false;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), "H5Pget succeeded");
- VRFY((prop_value == false && dxfer_coll_type == DXFER_COLLECTIVE_IO),
- "rank 0 Bcast optimization was performed for a compact dataset");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- /* Verify data value */
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
- if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j,
- outme[(i * size) + j], inme[(i * size) + j]);
-
- H5Pclose(plist);
- H5Pclose(dxpl);
- H5Dclose(dataset);
- H5Fclose(iof);
- free(inme);
- free(outme);
-}
-
-/*
- * Example of using PHDF5 to create, write, and read dataset and attribute
- * of Null dataspace.
- */
-void
-null_dataset(void)
-{
- int mpi_size, mpi_rank;
- hid_t iof, plist, dxpl, dataset, attr, sid;
- unsigned uval = 2; /* Buffer for writing to dataset */
- int val = 1; /* Buffer for writing to attribute */
- hssize_t nelem;
- char dname[] = "dataset";
- char attr_name[] = "attribute";
- herr_t ret;
- const char *filename;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, or attribute aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
-
- /* Define data space */
- sid = H5Screate(H5S_NULL);
-
- /* Check that the null dataspace actually has 0 elements */
- nelem = H5Sget_simple_extent_npoints(sid);
- VRFY((nelem == 0), "H5Sget_simple_extent_npoints");
-
- /* Create a compact dataset */
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- /* set up the collective transfer properties list */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "");
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Write "nothing" to the dataset(with type conversion) */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* Create an attribute for the group */
- attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((attr >= 0), "H5Acreate2");
-
- /* Write "nothing" to the attribute(with type conversion) */
- ret = H5Awrite(attr, H5T_NATIVE_INT, &val);
- VRFY((ret >= 0), "H5Awrite");
-
- H5Aclose(attr);
- H5Dclose(dataset);
- H5Pclose(plist);
- H5Sclose(sid);
- H5Fclose(iof);
-
- /* Open the file and dataset, read and compare the data. */
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
- VRFY((iof >= 0), "H5Fopen succeeded");
-
- /* set up the collective transfer properties list */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "");
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
-
- /* Try reading from the dataset(make certain our buffer is unmodified) */
- ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval);
- VRFY((ret >= 0), "H5Dread");
- VRFY((uval == 2), "H5Dread");
-
- /* Open the attribute for the dataset */
- attr = H5Aopen(dataset, attr_name, H5P_DEFAULT);
- VRFY((attr >= 0), "H5Aopen");
-
- /* Try reading from the attribute(make certain our buffer is unmodified) */ ret =
- H5Aread(attr, H5T_NATIVE_INT, &val);
- VRFY((ret >= 0), "H5Aread");
- VRFY((val == 1), "H5Aread");
-
- H5Pclose(plist);
- H5Pclose(dxpl);
- H5Aclose(attr);
- H5Dclose(dataset);
- H5Fclose(iof);
-}
-
-/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB)
- * Actual data is _not_ written to these datasets. Dataspaces are exact
- * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
- * the boundary of interest.
- */
-void
-big_dataset(void)
-{
- int mpi_size, mpi_rank; /* MPI info */
- hid_t iof, /* File ID */
- fapl, /* File access property list ID */
- dataset, /* Dataset ID */
- filespace; /* Dataset's dataspace ID */
- hsize_t file_dims[4]; /* Dimensions of dataspace */
- char dname[] = "dataset"; /* Name of dataset */
-#if 0
- MPI_Offset file_size; /* Size of file on disk */
-#endif
- herr_t ret; /* Generic return value */
- const char *filename;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /* Verify MPI_Offset can handle larger than 2GB sizes */
- VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4");
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl >= 0), "create_faccess_plist succeeded");
-
- /*
- * Create >2GB HDF5 file
- */
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
-
- /* Define dataspace for 2GB dataspace */
- file_dims[0] = 2;
- file_dims[1] = 1024;
- file_dims[2] = 1024;
- file_dims[3] = 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
-
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- /* Close all file objects */
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
-#if 0
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
-#endif
-
- /*
- * Create >4GB HDF5 file
- */
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
-
- /* Define dataspace for 4GB dataspace */
- file_dims[0] = 4;
- file_dims[1] = 1024;
- file_dims[2] = 1024;
- file_dims[3] = 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
-
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- /* Close all file objects */
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
-#if 0
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
-#endif
-
- /*
- * Create >8GB HDF5 file
- */
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
-
- /* Define dataspace for 8GB dataspace */
- file_dims[0] = 8;
- file_dims[1] = 1024;
- file_dims[2] = 1024;
- file_dims[3] = 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
-
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- /* Close all file objects */
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
-#if 0
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
-#endif
-
- /* Close fapl */
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-}
-
-/* Example of using PHDF5 to read a partial written dataset. The dataset does
- * not have actual data written to the entire raw data area and relies on the
- * default fill value of zeros to work correctly.
- */
-void
-dataset_fillvalue(void)
-{
- int mpi_size, mpi_rank; /* MPI info */
- int err_num; /* Number of errors */
- hid_t iof, /* File ID */
- fapl, /* File access property list ID */
- dxpl, /* Data transfer property list ID */
- dataset, /* Dataset ID */
- memspace, /* Memory dataspace ID */
- filespace; /* Dataset's dataspace ID */
- char dname[] = "dataset"; /* Name of dataset */
- hsize_t dset_dims[4] = {0, 6, 7, 8};
- hsize_t req_start[4] = {0, 0, 0, 0};
- hsize_t req_count[4] = {1, 6, 7, 8};
- hsize_t dset_size; /* Dataset size */
- int *rdata, *wdata; /* Buffers for data to read and write */
- int *twdata, *trdata; /* Temporary pointer into buffer */
- int acc, i, ii, j, k, l; /* Local index variables */
- herr_t ret; /* Generic return value */
- const char *filename;
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- bool prop_value;
-#endif
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = PARATESTFILE /* GetTestParameters() */;
-
- /* Set the dataset dimension to be one row more than number of processes */
- /* and calculate the actual dataset size. */
- dset_dims[0] = (hsize_t)(mpi_size + 1);
- dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3];
-
- /* Allocate space for the buffers */
- rdata = malloc((size_t)(dset_size * sizeof(int)));
- VRFY((rdata != NULL), "calloc succeeded for read buffer");
- wdata = malloc((size_t)(dset_size * sizeof(int)));
- VRFY((wdata != NULL), "malloc succeeded for write buffer");
-
- fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl >= 0), "create_faccess_plist succeeded");
-
- /*
- * Create HDF5 file
- */
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
-
- filespace = H5Screate_simple(4, dset_dims, NULL);
- VRFY((filespace >= 0), "File H5Screate_simple succeeded");
-
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
-
- memspace = H5Screate_simple(4, dset_dims, NULL);
- VRFY((memspace >= 0), "Memory H5Screate_simple succeeded");
-
- /*
- * Read dataset before any data is written.
- */
-
- /* Create DXPL for I/O */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
- NULL, NULL, NULL, NULL, NULL);
- VRFY((ret >= 0), "testing property list inserted succeeded");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- for (ii = 0; ii < 2; ii++) {
-
- if (ii == 0)
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- else
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* set entire read buffer with the constant 2 */
- memset(rdata, 2, (size_t)(dset_size * sizeof(int)));
-
- /* Read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = false;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), "testing property list get succeeded");
- if (ii == 0)
- VRFY((prop_value == false), "correctly handled rank 0 Bcast");
- else
- VRFY((prop_value == true), "correctly handled rank 0 Bcast");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- /* Verify all data read are the fill value 0 */
- trdata = rdata;
- err_num = 0;
- for (i = 0; i < (int)dset_dims[0]; i++)
- for (j = 0; j < (int)dset_dims[1]; j++)
- for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
- if (*trdata != 0)
- if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf(
- "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
- mpi_rank, i, j, k, l, *trdata);
- if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("Rank %d: [more errors ...]\n", mpi_rank);
- if (err_num) {
- printf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
- nerrors++;
- }
- }
-
- /* Barrier to ensure all processes have completed the above test. */
- MPI_Barrier(MPI_COMM_WORLD);
-
- /*
- * Each process writes 1 row of data. Thus last row is not written.
- */
- /* Create hyperslabs in memory and file dataspaces */
- req_start[0] = (hsize_t)mpi_rank;
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
- VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
- ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
- VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
-
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Fill write buffer with some values */
- twdata = wdata;
- for (i = 0, acc = 0; i < (int)dset_dims[0]; i++)
- for (j = 0; j < (int)dset_dims[1]; j++)
- for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++)
- *twdata++ = acc++;
-
- /* Collectively write a hyperslab of data to the dataset */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* Barrier here, to allow processes to sync */
- MPI_Barrier(MPI_COMM_WORLD);
-
- /*
- * Read dataset after partial write.
- */
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), " H5Pset succeeded");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- for (ii = 0; ii < 2; ii++) {
-
- if (ii == 0)
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- else
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* set entire read buffer with the constant 2 */
- memset(rdata, 2, (size_t)(dset_size * sizeof(int)));
-
- /* Read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = false;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), "testing property list get succeeded");
- if (ii == 0)
- VRFY((prop_value == false), "correctly handled rank 0 Bcast");
- else
- VRFY((prop_value == true), "correctly handled rank 0 Bcast");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
- /* Verify correct data read */
- twdata = wdata;
- trdata = rdata;
- err_num = 0;
- for (i = 0; i < (int)dset_dims[0]; i++)
- for (j = 0; j < (int)dset_dims[1]; j++)
- for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
- if (i < mpi_size) {
- if (*twdata != *trdata)
- if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n",
- i, j, k, l, *twdata, *trdata);
- } /* end if */
- else {
- if (*trdata != 0)
- if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,
- j, k, l, *trdata);
- } /* end else */
- if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (err_num) {
- printf("%d errors found in check_value\n", err_num);
- nerrors++;
- }
- }
-
- /* Close all file objects */
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Sclose(filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Fclose(iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- /* Close memory dataspace */
- ret = H5Sclose(memspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
-
- /* Close dxpl */
- ret = H5Pclose(dxpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* Close fapl */
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* free the buffers */
- free(rdata);
- free(wdata);
-}
-
-/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
-void
-collective_group_write_independent_group_read(void)
-{
- collective_group_write();
- independent_group_read();
-}
-
-/* Write multiple groups with a chunked dataset in each group collectively.
- * These groups and datasets are for testing independent read later.
- */
-void
-collective_group_write(void)
-{
- int mpi_rank, mpi_size, size;
- int i, j, m;
- char gname[64], dname[32];
- hid_t fid, gid, did, plist, dcpl, memspace, filespace;
- DATATYPE *outme = NULL;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
- herr_t ret1, ret2;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
- int ngroups;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
- /* ngroups = pt->count; */ ngroups = NGROUPS;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(
- " API functions for basic file, group, or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- size = get_size();
-
- chunk_size[0] = (hsize_t)(size / 2);
- chunk_size[1] = (hsize_t)(size / 2);
-
- outme = malloc((size_t)size * (size_t)size * sizeof(DATATYPE));
- VRFY((outme != NULL), "malloc succeeded for outme");
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- VRFY((fid >= 0), "H5Fcreate");
- H5Pclose(plist);
-
- /* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
-
- /* select hyperslab in memory and file spaces. These two operations are
- * identical since the datasets are the same. */
- memspace = H5Screate_simple(DIM, file_dims, NULL);
- ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- VRFY((memspace >= 0), "memspace");
- VRFY((filespace >= 0), "filespace");
- VRFY((ret1 == 0), "mgroup memspace selection");
- VRFY((ret2 == 0), "mgroup filespace selection");
-
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
- VRFY((dcpl >= 0), "dataset creation property");
- VRFY((ret1 == 0), "set chunk for dataset creation property");
-
- /* creates ngroups groups under the root group, writes chunked
- * datasets in parallel. */
- for (m = 0; m < ngroups; m++) {
- snprintf(gname, sizeof(gname), "group%d", m);
- gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((gid > 0), gname);
-
- snprintf(dname, sizeof(dname), "dataset%d", m);
- did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((did > 0), dname);
-
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
-
- ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
- VRFY((ret1 == 0), "H5Dwrite");
-
- ret1 = H5Dclose(did);
- VRFY((ret1 == 0), "H5Dclose");
-
- ret1 = H5Gclose(gid);
- VRFY((ret1 == 0), "H5Gclose");
-
-#ifdef BARRIER_CHECKS
- if (!((m + 1) % 10)) {
- printf("created %d groups\n", m + 1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
-#endif /* BARRIER_CHECKS */
- }
-
- H5Pclose(dcpl);
- H5Sclose(filespace);
- H5Sclose(memspace);
-
- ret1 = H5Fclose(fid);
- VRFY((ret1 == 0), "H5Fclose");
-
- free(outme);
-}
-
-/* Let two sets of processes open and read different groups and chunked
- * datasets independently.
- */
-void
-independent_group_read(void)
-{
- int mpi_rank, m;
- hid_t plist, fid;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
- int ngroups;
- herr_t ret;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
- /* ngroups = pt->count; */ ngroups = NGROUPS;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(
- " API functions for basic file, group, or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- H5Pset_all_coll_metadata_ops(plist, false);
-
- fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
- VRFY((fid > 0), "H5Fopen");
- H5Pclose(plist);
-
- /* open groups and read datasets. Odd number processes read even number
- * groups from the end; even number processes read odd number groups
- * from the beginning. */
- if (mpi_rank % 2 == 0) {
- for (m = ngroups - 1; m == 0; m -= 2)
- group_dataset_read(fid, mpi_rank, m);
- }
- else {
- for (m = 0; m < ngroups; m += 2)
- group_dataset_read(fid, mpi_rank, m);
- }
-
- ret = H5Fclose(fid);
- VRFY((ret == 0), "H5Fclose");
-}
-
-/* Open and read datasets and compare data
- */
-static void
-group_dataset_read(hid_t fid, int mpi_rank, int m)
-{
- int ret, i, j, size;
- char gname[64], dname[32];
- hid_t gid, did;
- DATATYPE *outdata = NULL;
- DATATYPE *indata = NULL;
-
- size = get_size();
-
- indata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE));
- VRFY((indata != NULL), "malloc succeeded for indata");
-
- outdata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE));
- VRFY((outdata != NULL), "malloc succeeded for outdata");
-
- /* open every group under root group. */
- snprintf(gname, sizeof(gname), "group%d", m);
- gid = H5Gopen2(fid, gname, H5P_DEFAULT);
- VRFY((gid > 0), gname);
-
- /* check the data. */
- snprintf(dname, sizeof(dname), "dataset%d", m);
- did = H5Dopen2(gid, dname, H5P_DEFAULT);
- VRFY((did > 0), dname);
-
- H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
-
- /* this is the original value */
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
-
- /* compare the original value(outdata) to the value in file(indata).*/
- ret = check_value(indata, outdata, size);
- VRFY((ret == 0), "check the data");
-
- ret = H5Dclose(did);
- VRFY((ret == 0), "H5Dclose");
- ret = H5Gclose(gid);
- VRFY((ret == 0), "H5Gclose");
-
- free(indata);
- free(outdata);
-}
-
-/*
- * Example of using PHDF5 to create multiple groups. Under the root group,
- * it creates ngroups groups. Under the first group just created, it creates
- * recursive subgroups of depth GROUP_DEPTH. In each created group, it
- * generates NDATASETS datasets. Each process write a hyperslab of an array
- * into the file. The structure is like
- *
- * root group
- * |
- * ---------------------------- ... ... ------------------------
- * | | | ... ... | |
- * group0*+' group1*+' group2*+' ... ... group ngroups*+'
- * |
- * 1st_child_group*'
- * |
- * 2nd_child_group*'
- * |
- * :
- * :
- * |
- * GROUP_DEPTHth_child_group*'
- *
- * * means the group has dataset(s).
- * + means the group has attribute(s).
- * ' means the datasets in the groups have attribute(s).
- *
- */
-void
-multiple_group_write(void)
-{
- int mpi_rank, mpi_size, size;
- int m;
- char gname[64];
- hid_t fid, gid, plist, memspace, filespace;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- herr_t ret;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
- int ngroups;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
- /* ngroups = pt->count; */ ngroups = NGROUPS;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
- "this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- size = get_size();
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- H5Pclose(plist);
-
- /* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
-
- /* select hyperslab in memory and file spaces. These two operations are
- * identical since the datasets are the same. */
- memspace = H5Screate_simple(DIM, file_dims, NULL);
- VRFY((memspace >= 0), "memspace");
- ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- VRFY((ret >= 0), "mgroup memspace selection");
-
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- VRFY((filespace >= 0), "filespace");
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- VRFY((ret >= 0), "mgroup filespace selection");
-
- /* creates ngroups groups under the root group, writes datasets in
- * parallel. */
- for (m = 0; m < ngroups; m++) {
- snprintf(gname, sizeof(gname), "group%d", m);
- gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((gid > 0), gname);
-
- /* create attribute for these groups. */
- write_attribute(gid, is_group, m);
-
- if (m != 0)
- write_dataset(memspace, filespace, gid);
-
- H5Gclose(gid);
-
-#ifdef BARRIER_CHECKS
- if (!((m + 1) % 10)) {
- printf("created %d groups\n", m + 1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
-#endif /* BARRIER_CHECKS */
- }
-
- /* recursively creates subgroups under the first group. */
- gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
- create_group_recursive(memspace, filespace, gid, 0);
- ret = H5Gclose(gid);
- VRFY((ret >= 0), "H5Gclose");
-
- ret = H5Sclose(filespace);
- VRFY((ret >= 0), "H5Sclose");
- ret = H5Sclose(memspace);
- VRFY((ret >= 0), "H5Sclose");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose");
-}
-
-/*
- * In a group, creates NDATASETS datasets. Each process writes a hyperslab
- * of a data array to the file.
- */
-static void
-write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
-{
- int i, j, n, size;
- int mpi_rank, mpi_size;
- char dname[32];
- DATATYPE *outme = NULL;
- hid_t did;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- size = get_size();
-
- outme = malloc((size_t)size * (size_t)size * sizeof(double));
- VRFY((outme != NULL), "malloc succeeded for outme");
-
- for (n = 0; n < NDATASET; n++) {
- snprintf(dname, sizeof(dname), "dataset%d", n);
- did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((did > 0), dname);
-
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++)
- outme[(i * size) + j] = n * 1000 + mpi_rank;
-
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
-
- /* create attribute for these datasets.*/
- write_attribute(did, is_dset, n);
-
- H5Dclose(did);
- }
- free(outme);
-}
-
-/*
- * Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets
- * in parallel in each group.
- */
-static void
-create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
-{
- hid_t child_gid;
- int mpi_rank;
- char gname[64];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
-#ifdef BARRIER_CHECKS
- if (!((counter + 1) % 10)) {
- printf("created %dth child groups\n", counter + 1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
-#endif /* BARRIER_CHECKS */
-
- snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
- child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((child_gid > 0), gname);
-
- /* write datasets in parallel. */
- write_dataset(memspace, filespace, gid);
-
- if (counter < GROUP_DEPTH)
- create_group_recursive(memspace, filespace, child_gid, counter + 1);
-
- H5Gclose(child_gid);
-}
-
-/*
- * This function is to verify the data from multiple group testing. It opens
- * every dataset in every group and check their correctness.
- */
-void
-multiple_group_read(void)
-{
- int mpi_rank, mpi_size, error_num, size;
- int m;
- char gname[64];
- hid_t plist, fid, gid, memspace, filespace;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
- int ngroups;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
- /* ngroups = pt->count; */ ngroups = NGROUPS;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
- "this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- size = get_size();
-
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
- H5Pclose(plist);
-
- /* decide hyperslab for each process */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
-
- /* select hyperslab for memory and file space */
- memspace = H5Screate_simple(DIM, file_dims, NULL);
- H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
-
- /* open every group under root group. */
- for (m = 0; m < ngroups; m++) {
- snprintf(gname, sizeof(gname), "group%d", m);
- gid = H5Gopen2(fid, gname, H5P_DEFAULT);
- VRFY((gid > 0), gname);
-
- /* check the data. */
- if (m != 0)
- if ((error_num = read_dataset(memspace, filespace, gid)) > 0)
- nerrors += error_num;
-
- /* check attribute.*/
- error_num = 0;
- if ((error_num = read_attribute(gid, is_group, m)) > 0)
- nerrors += error_num;
-
- H5Gclose(gid);
-
-#ifdef BARRIER_CHECKS
- if (!((m + 1) % 10))
- MPI_Barrier(MPI_COMM_WORLD);
-#endif /* BARRIER_CHECKS */
- }
-
- /* open all the groups in vertical direction. */
- gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
- VRFY((gid > 0), "group0");
- recursive_read_group(memspace, filespace, gid, 0);
- H5Gclose(gid);
-
- H5Sclose(filespace);
- H5Sclose(memspace);
- H5Fclose(fid);
-}
-
-/*
- * This function opens all the datasets in a certain, checks the data using
- * dataset_vrfy function.
- */
-static int
-read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
-{
- int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0;
- char dname[32];
- DATATYPE *outdata = NULL, *indata = NULL;
- hid_t did;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- size = get_size();
-
- indata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE));
- VRFY((indata != NULL), "malloc succeeded for indata");
-
- outdata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE));
- VRFY((outdata != NULL), "malloc succeeded for outdata");
-
- for (n = 0; n < NDATASET; n++) {
- snprintf(dname, sizeof(dname), "dataset%d", n);
- did = H5Dopen2(gid, dname, H5P_DEFAULT);
- VRFY((did > 0), dname);
-
- H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
-
- /* this is the original value */
- for (i = 0; i < size; i++)
- for (j = 0; j < size; j++) {
- *outdata = n * 1000 + mpi_rank;
- outdata++;
- }
- outdata -= size * size;
-
- /* compare the original value(outdata) to the value in file(indata).*/
- vrfy_errors = check_value(indata, outdata, size);
-
- /* check attribute.*/
- if ((attr_errors = read_attribute(did, is_dset, n)) > 0)
- vrfy_errors += attr_errors;
-
- H5Dclose(did);
- }
-
- free(indata);
- free(outdata);
-
- return vrfy_errors;
-}
-
-/*
- * This recursive function opens all the groups in vertical direction and
- * checks the data.
- */
-static void
-recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
-{
- hid_t child_gid;
- int mpi_rank, err_num = 0;
- char gname[64];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-#ifdef BARRIER_CHECKS
- if ((counter + 1) % 10)
- MPI_Barrier(MPI_COMM_WORLD);
-#endif /* BARRIER_CHECKS */
-
- if ((err_num = read_dataset(memspace, filespace, gid)))
- nerrors += err_num;
-
- if (counter < GROUP_DEPTH) {
- snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
- child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
- VRFY((child_gid > 0), gname);
- recursive_read_group(memspace, filespace, child_gid, counter + 1);
- H5Gclose(child_gid);
- }
-}
-
-/* Create and write attribute for a group or a dataset. For groups, attribute
- * is a scalar datum; for dataset, it is a one-dimensional array.
- */
-static void
-write_attribute(hid_t obj_id, int this_type, int num)
-{
- hid_t sid, aid;
- hsize_t dspace_dims[1] = {8};
- int i, mpi_rank, attr_data[8], dspace_rank = 1;
- char attr_name[32];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- if (this_type == is_group) {
- snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
- sid = H5Screate(H5S_SCALAR);
- aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
- H5Awrite(aid, H5T_NATIVE_INT, &num);
- H5Aclose(aid);
- H5Sclose(sid);
- } /* end if */
- else if (this_type == is_dset) {
- snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
- for (i = 0; i < 8; i++)
- attr_data[i] = i;
- sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
- aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
- H5Awrite(aid, H5T_NATIVE_INT, attr_data);
- H5Aclose(aid);
- H5Sclose(sid);
- } /* end else-if */
-}
-
-/* Read and verify attribute for group or dataset. */
-static int
-read_attribute(hid_t obj_id, int this_type, int num)
-{
- hid_t aid;
- hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8};
- int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
- char attr_name[32];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- if (this_type == is_group) {
- snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
- aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- H5Aread(aid, H5T_NATIVE_INT, &in_num);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
- H5Aclose(aid);
- }
- else if (this_type == is_dset) {
- snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
- for (i = 0; i < 8; i++)
- out_data[i] = i;
- aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- H5Aread(aid, H5T_NATIVE_INT, in_data);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
- H5Aclose(aid);
- }
-
- return vrfy_errors;
-}
-
-/* This functions compares the original data with the read-in data for its
- * hyperslab part only by process ID.
- */
-static int
-check_value(DATATYPE *indata, DATATYPE *outdata, int size)
-{
- int mpi_rank, mpi_size, err_num = 0;
- hsize_t i, j;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], count[DIM];
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- get_slab(chunk_origin, chunk_dims, count, NULL, size);
-
- indata += chunk_origin[0] * (hsize_t)size;
- outdata += chunk_origin[0] * (hsize_t)size;
- for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++)
- for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) {
- if (*indata != *outdata)
- if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata,
- *indata);
- }
- if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if (err_num)
- printf("%d errors found in check_value\n", err_num);
- return err_num;
-}
-
-/* Decide the portion of data chunk in dataset by process ID.
- */
-
-static void
-get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size)
-{
- int mpi_rank, mpi_size;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- if (chunk_origin != NULL) {
- chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size);
- chunk_origin[1] = 0;
- }
- if (chunk_dims != NULL) {
- chunk_dims[0] = (hsize_t)(size / mpi_size);
- chunk_dims[1] = (hsize_t)size;
- }
- if (file_dims != NULL)
- file_dims[0] = file_dims[1] = (hsize_t)size;
- if (count != NULL)
- count[0] = count[1] = 1;
-}
-
-/*
- * This function is based on bug demonstration code provided by Thomas
- * Guignon(thomas.guignon@ifp.fr), and is intended to verify the
- * correctness of my fix for that bug.
- *
- * In essence, the bug appeared when at least one process attempted to
- * write a point selection -- for which collective I/O is not supported,
- * and at least one other attempted to write some other type of selection
- * for which collective I/O is supported.
- *
- * Since the processes did not compare notes before performing the I/O,
- * some would attempt collective I/O while others performed independent
- * I/O. A hang resulted.
- *
- * This function reproduces this situation. At present the test hangs
- * on failure.
- * JRM - 9/13/04
- */
-
-#define N 4
-
-void
-io_mode_confusion(void)
-{
- /*
- * HDF5 APIs definitions
- */
-
- const int rank = 1;
- const char *dataset_name = "IntArray";
-
- hid_t file_id, dset_id; /* file and dataset identifiers */
- hid_t filespace, memspace; /* file and memory dataspace */
- /* identifiers */
- hsize_t dimsf[1]; /* dataset dimensions */
- int data[N] = {1}; /* pointer to data buffer to write */
- hsize_t coord[N] = {0L, 1L, 2L, 3L};
- hid_t plist_id; /* property list identifier */
- herr_t status;
-
- /*
- * MPI variables
- */
-
- int mpi_size, mpi_rank;
-
- /*
- * test bed related variables
- */
-
- const char *fcn_name = "io_mode_confusion";
- const bool verbose = false;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
-
-#if 0
- pt = GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- /*
- * Set up file access property list with parallel I/O access
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
-
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id != -1), "H5Pcreate() failed");
-
- status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((status >= 0), "H5Pset_fapl_mpio() failed");
-
- /*
- * Create a new file collectively and release property list identifier.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
- VRFY((file_id >= 0), "H5Fcreate() failed");
-
- status = H5Pclose(plist_id);
- VRFY((status >= 0), "H5Pclose() failed");
-
- /*
- * Create the dataspace for the dataset.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name);
-
- dimsf[0] = N;
- filespace = H5Screate_simple(rank, dimsf, NULL);
- VRFY((filespace >= 0), "H5Screate_simple() failed.");
-
- /*
- * Create the dataset with default properties and close filespace.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name);
-
- dset_id =
- H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2() failed");
-
- status = H5Sclose(filespace);
- VRFY((status >= 0), "H5Sclose() failed");
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name);
-
- memspace = H5Screate_simple(rank, dimsf, NULL);
- VRFY((memspace >= 0), "H5Screate_simple() failed.");
-
- if (mpi_rank == 0) {
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name);
-
- status = H5Sselect_all(memspace);
- VRFY((status >= 0), "H5Sselect_all() failed");
- }
- else {
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name);
-
- status = H5Sselect_none(memspace);
- VRFY((status >= 0), "H5Sselect_none() failed");
- }
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name);
-
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "H5Dget_space() failed");
-
- /* select all */
- if (mpi_rank == 0) {
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name);
-
- status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord);
- VRFY((status >= 0), "H5Sselect_elements() failed");
- }
- else { /* select nothing */
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name);
-
- status = H5Sselect_none(filespace);
- VRFY((status >= 0), "H5Sselect_none() failed");
- }
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
-
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id != -1), "H5Pcreate() failed");
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name);
-
- status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
- VRFY((status >= 0), "H5Pset_dxpl_mpio() failed");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status >= 0), "set independent IO collectively succeeded");
- }
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name);
-
- status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status);
- VRFY((status >= 0), "H5Dwrite() failed");
-
- /*
- * Close/release resources.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name);
-
- status = H5Dclose(dset_id);
- VRFY((status >= 0), "H5Dclose() failed");
-
- status = H5Sclose(filespace);
- VRFY((status >= 0), "H5Dclose() failed");
-
- status = H5Sclose(memspace);
- VRFY((status >= 0), "H5Sclose() failed");
-
- status = H5Pclose(plist_id);
- VRFY((status >= 0), "H5Pclose() failed");
-
- status = H5Fclose(file_id);
- VRFY((status >= 0), "H5Fclose() failed");
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
-
- return;
-
-} /* io_mode_confusion() */
-
-#undef N
-
-/*
- * At present, the object header code maintains an image of its on disk
- * representation, which is updates as necessary instead of generating on
- * request.
- *
- * Prior to the fix that this test in designed to verify, the image of the
- * on disk representation was only updated on flush -- not when the object
- * header was marked clean.
- *
- * This worked perfectly well as long as all writes of a given object
- * header were written from a single process. However, with the implementation
- * of round robin metadata data writes in parallel HDF5, this is no longer
- * the case -- it is possible for a given object header to be flushed from
- * several different processes, with the object header simply being marked
- * clean in all other processes on each flush. This resulted in NULL or
- * out of data object header information being written to disk.
- *
- * To repair this, I modified the object header code to update its
- * on disk image both on flush on when marked clean.
- *
- * This test is directed at verifying that the fix performs as expected.
- *
- * The test functions by creating a HDF5 file with several small datasets,
- * and then flushing the file. This should result of at least one of
- * the associated object headers being flushed by a process other than
- * process 0.
- *
- * Then for each data set, add an attribute and flush the file again.
- *
- * Close the file and re-open it.
- *
- * Open the each of the data sets in turn. If all opens are successful,
- * the test passes. Otherwise the test fails.
- *
- * Note that this test will probably become irrelevant shortly, when we
- * land the journaling modifications on the trunk -- at which point all
- * cache clients will have to construct on disk images on demand.
- *
- * JRM -- 10/13/10
- */
-
-#define NUM_DATA_SETS 4
-#define LOCAL_DATA_SIZE 4
-#define LARGE_ATTR_SIZE 256
-/* Since all even and odd processes are split into writer and reader comm
- * respectively, process 0 and 1 in COMM_WORLD become the root process of
- * the writer and reader comm respectively.
- */
-#define Writer_Root 0
-#define Reader_Root 1
-#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
-#define Reader_result(mpi_err, xsteps_done) \
- mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD)
-#define Reader_check(mpi_err, xsteps, xsteps_done) \
- { \
- Reader_wait(mpi_err, xsteps); \
- Reader_result(mpi_err, xsteps_done); \
- }
-
-/* object names used by both rr_obj_hdr_flush_confusion and
- * rr_obj_hdr_flush_confusion_reader.
- */
-const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"};
-const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"};
-const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2",
- "large_attribute_3"};
-
-void
-rr_obj_hdr_flush_confusion(void)
-{
- /* MPI variables */
- /* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- int is_reader; /* 1 for reader process; 0 for writer process. */
- MPI_Comm comm;
-
- /* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion";
- const bool verbose = false;
-
- /* Create two new private communicators from MPI_COMM_WORLD.
- * Even and odd ranked processes go to comm_writers and comm_readers
- * respectively.
- */
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or "
- "file flush aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- assert(mpi_size > 2);
-
- is_reader = mpi_rank % 2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
-
- /* The reader processes branches off to do reading
- * while the writer processes continues to do writing
- * Whenever writers finish one writing step, including a H5Fflush,
- * they inform the readers, via MPI_COMM_WORLD, to verify.
- * They will wait for the result from the readers before doing the next
- * step. When all steps are done, they inform readers to end.
- */
- if (is_reader)
- rr_obj_hdr_flush_confusion_reader(comm);
- else
- rr_obj_hdr_flush_confusion_writer(comm);
-
- MPI_Comm_free(&comm);
- if (verbose)
- fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
-
- return;
-
-} /* rr_obj_hdr_flush_confusion() */
-
-void
-rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
-{
- int i;
- int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t att_id[NUM_DATA_SETS];
- hid_t att_space[NUM_DATA_SETS];
- hid_t lg_att_id[NUM_DATA_SETS];
- hid_t lg_att_space[NUM_DATA_SETS];
- hid_t disk_space[NUM_DATA_SETS];
- hid_t mem_space[NUM_DATA_SETS];
- hid_t dataset[NUM_DATA_SETS];
- hsize_t att_size[1];
- hsize_t lg_att_size[1];
- hsize_t disk_count[1];
- hsize_t disk_size[1];
- hsize_t disk_start[1];
- hsize_t mem_count[1];
- hsize_t mem_size[1];
- hsize_t mem_start[1];
- herr_t err;
- double data[LOCAL_DATA_SIZE];
- double att[LOCAL_DATA_SIZE];
- double lg_att[LARGE_ATTR_SIZE];
-
- /* MPI variables */
- /* world communication size and rank */
- int mpi_world_size;
- int mpi_world_rank;
- /* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- /* steps to verify and have been verified */
- int steps = 0;
- int steps_done = 0;
-
- /* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const bool verbose = false;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
-
- /*
- * setup test bed related variables:
- */
-
-#if 0
- pt = (const H5Ptest_param_t *)GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
- MPI_Comm_rank(comm, &mpi_rank);
- MPI_Comm_size(comm, &mpi_size);
-
- /*
- * Set up file access property list with parallel I/O access
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
-
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
-
- err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
-
- /*
- * Create a new file collectively and release property list identifier.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename);
-
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate() failed");
-
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "H5Pclose(fapl_id) failed");
-
- /*
- * Step 1: create the data sets and write data.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name);
-
- disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
-
- disk_space[i] = H5Screate_simple(1, disk_size, NULL);
- VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
-
- dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT,
- H5P_DEFAULT, H5P_DEFAULT);
-
- VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
- }
-
- /*
- * setup data transfer property list
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
-
- err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
-
- /*
- * write data to the data sets
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name);
-
- disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
-
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- data[j] = (double)(mpi_rank + 1);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
- mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data);
- VRFY((err >= 0), "H5Dwrite(1) failed.\n");
- for (j = 0; j < LOCAL_DATA_SIZE; j++)
- data[j] *= 10.0;
- }
-
- /*
- * close the data spaces
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Sclose(disk_space[i]);
- VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
- err = H5Sclose(mem_space[i]);
- VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
- }
-
- /* End of Step 1: create the data sets and write data. */
-
- /*
- * flush the metadata cache
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "H5Fflush(1) failed.\n");
-
- /* Tell the reader to check the file up to steps. */
- steps++;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- /*
- * Step 2: write attributes to each dataset
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name);
-
- att_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- att[j] = (double)(j + 1);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- att_space[i] = H5Screate_simple(1, att_size, NULL);
- VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
- att_id[i] =
- H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT);
- VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
- err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
- VRFY((err >= 0), "H5Awrite(1) failed.\n");
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- att[j] /= 10.0;
- }
- }
-
- /*
- * close attribute IDs and spaces
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Sclose(att_space[i]);
- VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
- err = H5Aclose(att_id[i]);
- VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n");
- }
-
- /* End of Step 2: write attributes to each dataset */
-
- /*
- * flush the metadata cache again
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "H5Fflush(2) failed.\n");
-
- /* Tell the reader to check the file up to steps. */
- steps++;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- /*
- * Step 3: write large attributes to each dataset
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name);
-
- lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
-
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
- lg_att[j] = (double)(j + 1);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
- VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
- lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT,
- H5P_DEFAULT);
- VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
- err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
- VRFY((err >= 0), "H5Awrite(2) failed.\n");
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
- lg_att[j] /= 10.0;
- }
- }
-
- /* Step 3: write large attributes to each dataset */
-
- /*
- * flush the metadata cache yet again to clean the object headers.
- *
- * This is an attempt to create a situation where we have dirty
- * object header continuation chunks, but clean object headers
- * to verify a speculative bug fix -- it doesn't seem to work,
- * but I will leave the code in anyway, as the object header
- * code is going to change a lot in the near future.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "H5Fflush(3) failed.\n");
-
- /* Tell the reader to check the file up to steps. */
- steps++;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- /*
- * Step 4: write different large attributes to each dataset
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name);
-
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
- lg_att[j] = (double)(j + 2);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
- VRFY((err >= 0), "H5Awrite(2) failed.\n");
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
- lg_att[j] /= 10.0;
- }
- }
-
- /* End of Step 4: write different large attributes to each dataset */
-
- /*
- * flush the metadata cache again
- */
- if (verbose)
- fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
- VRFY((err >= 0), "H5Fflush(3) failed.\n");
-
- /* Tell the reader to check the file up to steps. */
- steps++;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- /* Step 5: Close all objects and the file */
-
- /*
- * close large attribute IDs and spaces
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
-
- err = H5Sclose(lg_att_space[i]);
- VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n");
- err = H5Aclose(lg_att_id[i]);
- VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n");
- }
-
- /*
- * close the data sets
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Dclose(dataset[i]);
- VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
- }
-
- /*
- * close the data transfer property list.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
-
- err = H5Pclose(dxpl_id);
- VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
-
- /*
- * Close file.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name);
-
- err = H5Fclose(file_id);
- VRFY((err >= 0), "H5Fclose(1) failed");
-
- /* End of Step 5: Close all objects and the file */
- /* Tell the reader to check the file up to steps. */
- steps++;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- /* All done. Inform reader to end. */
- steps = 0;
- Reader_check(mrc, steps, steps_done);
- VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
-
- return;
-
-} /* rr_obj_hdr_flush_confusion_writer() */
-
-void
-rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
-{
- int i;
- int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t lg_att_id[NUM_DATA_SETS];
- hid_t lg_att_type[NUM_DATA_SETS];
- hid_t disk_space[NUM_DATA_SETS];
- hid_t mem_space[NUM_DATA_SETS];
- hid_t dataset[NUM_DATA_SETS];
- hsize_t disk_count[1];
- hsize_t disk_start[1];
- hsize_t mem_count[1];
- hsize_t mem_size[1];
- hsize_t mem_start[1];
- herr_t err;
- htri_t tri_err;
- double data[LOCAL_DATA_SIZE];
- double data_read[LOCAL_DATA_SIZE];
- double att[LOCAL_DATA_SIZE];
- double att_read[LOCAL_DATA_SIZE];
- double lg_att[LARGE_ATTR_SIZE];
- double lg_att_read[LARGE_ATTR_SIZE];
-
- /* MPI variables */
- /* world communication size and rank */
- int mpi_world_size;
- int mpi_world_rank;
- /* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- int steps = -1; /* How far (steps) to verify the file */
- int steps_done = -1; /* How far (steps) have been verified */
-
- /* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const bool verbose = false;
-#if 0
- const H5Ptest_param_t *pt;
-#endif
- char *filename;
-
- /*
- * setup test bed related variables:
- */
-
-#if 0
- pt = (const H5Ptest_param_t *)GetTestParameters();
-#endif
- /* filename = pt->name; */ filename = PARATESTFILE;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
- MPI_Comm_rank(comm, &mpi_rank);
- MPI_Comm_size(comm, &mpi_size);
-
- /* Repeatedly re-open the file and verify its contents until it is */
- /* told to end (when steps=0). */
- while (steps_done != 0) {
- Reader_wait(mrc, steps);
- VRFY((mrc >= 0), "Reader_wait failed");
- steps_done = 0;
-
- if (steps > 0) {
- /*
- * Set up file access property list with parallel I/O access
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
-
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
- err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
-
- /*
- * Create a new file collectively and release property list identifier.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename);
-
- file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
- VRFY((file_id >= 0), "H5Fopen() failed");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "H5Pclose(fapl_id) failed");
-
-#if 1
- if (steps >= 1) {
- /*=====================================================*
- * Step 1: open the data sets and read data.
- *=====================================================*/
-
- if (verbose)
- fprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- dataset[i] = -1;
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
- VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
- disk_space[i] = H5Dget_space(dataset[i]);
- VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
- }
-
- /*
- * setup data transfer property list
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
- err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
-
- /*
- * read data from the data sets
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
-
- disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
-
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
-
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
-
- /* set up expected data for verification */
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- data[j] = (double)(mpi_rank + 1);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count,
- NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
- mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id,
- data_read);
- VRFY((err >= 0), "H5Dread(1) failed.\n");
-
- /* compare read data with expected data */
- for (j = 0; j < LOCAL_DATA_SIZE; j++)
- if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) {
- fprintf(stdout,
- "%0d:%s: Reading datasets value failed in "
- "Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, data[j], data_read[j]);
- nerrors++;
- }
- for (j = 0; j < LOCAL_DATA_SIZE; j++)
- data[j] *= 10.0;
- }
-
- /*
- * close the data spaces
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- err = H5Sclose(disk_space[i]);
- VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
- err = H5Sclose(mem_space[i]);
- VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
- }
- steps_done++;
- }
- /* End of Step 1: open the data sets and read data. */
-#endif
-
-#if 1
- /*=====================================================*
- * Step 2: reading attributes from each dataset
- *=====================================================*/
-
- if (steps >= 2) {
- if (verbose)
- fprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
-
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- att[j] = (double)(j + 1);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- hid_t att_id, att_type;
-
- att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
- VRFY((att_id >= 0), "H5Aopen failed.\n");
- att_type = H5Aget_type(att_id);
- VRFY((att_type >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err == 0) {
- fprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank,
- fcn_name, i);
- nerrors++;
- }
- else {
- /* should verify attribute size before H5Aread */
- err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for (j = 0; j < LOCAL_DATA_SIZE; j++)
- if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) {
- fprintf(stdout,
- "%0d:%s: Mismatched attribute data read in Dataset %d, at position "
- "%d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, att[j], att_read[j]);
- nerrors++;
- }
- for (j = 0; j < LOCAL_DATA_SIZE; j++) {
- att[j] /= 10.0;
- }
- }
- err = H5Aclose(att_id);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- steps_done++;
- }
- /* End of Step 2: reading attributes from each dataset */
-#endif
-
-#if 1
- /*=====================================================*
- * Step 3 or 4: read large attributes from each dataset.
- * Step 4 has different attribute value from step 3.
- *=====================================================*/
-
- if (steps >= 3) {
- if (verbose)
- fprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
-
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
- lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2);
- }
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
- VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
- lg_att_type[i] = H5Aget_type(lg_att_id[i]);
- VRFY((err >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err == 0) {
- fprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", mpi_rank,
- fcn_name, i);
- nerrors++;
- }
- else {
- /* should verify large attribute size before H5Aread */
- err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for (j = 0; j < LARGE_ATTR_SIZE; j++)
- if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) {
- fprintf(stdout,
- "%0d:%s: Mismatched large attribute data read in Dataset %d, at "
- "position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
- nerrors++;
- }
- for (j = 0; j < LARGE_ATTR_SIZE; j++) {
-
- lg_att[j] /= 10.0;
- }
- }
- err = H5Tclose(lg_att_type[i]);
- VRFY((err >= 0), "H5Tclose failed.\n");
- err = H5Aclose(lg_att_id[i]);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- /* Both step 3 and 4 use this same read checking code. */
- steps_done = (steps == 3) ? 3 : 4;
- }
-
- /* End of Step 3 or 4: read large attributes from each dataset */
-#endif
-
- /*=====================================================*
- * Step 5: read all objects from the file
- *=====================================================*/
- if (steps >= 5) {
- /* nothing extra to verify. The file is closed normally. */
- /* Just increment steps_done */
- steps_done++;
- }
-
- /*
- * Close the data sets
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name);
-
- for (i = 0; i < NUM_DATA_SETS; i++) {
- if (dataset[i] >= 0) {
- err = H5Dclose(dataset[i]);
- VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
- }
- }
-
- /*
- * close the data transfer property list.
- */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
-
- err = H5Pclose(dxpl_id);
- VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
-
- /*
- * Close the file
- */
- if (verbose)
- fprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name);
- err = H5Fclose(file_id);
- VRFY((err >= 0), "H5Fclose(1) failed");
-
- } /* else if (steps_done==0) */
- Reader_result(mrc, steps_done);
- } /* end while(1) */
-
- if (verbose)
- fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
-
- return;
-} /* rr_obj_hdr_flush_confusion_reader() */
-
-#undef NUM_DATA_SETS
-#undef LOCAL_DATA_SIZE
-#undef LARGE_ATTR_SIZE
-#undef Reader_check
-#undef Reader_wait
-#undef Reader_result
-#undef Writer_Root
-#undef Reader_Root
-
-/*
- * Test creating a chunked dataset in parallel in a file with an alignment set
- * and an alignment threshold large enough to avoid aligning the chunks but
- * small enough that the raw data aggregator will be aligned if it is treated as
- * an object that must be aligned by the library
- */
-#define CHUNK_SIZE 72
-#define NCHUNKS 32
-#define AGGR_SIZE 2048
-#define EXTRA_ALIGN 100
-
-void
-chunk_align_bug_1(void)
-{
- int mpi_rank;
- hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
- hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
-#if 0
- h5_stat_size_t file_size;
- hsize_t align;
-#endif
- herr_t ret;
- const char *filename;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
-
- /* Create file without alignment */
- fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- /* Close file */
- ret = H5Fclose(file_id);
- VRFY((ret >= 0), "H5Fclose succeeded");
-#if 0
- /* Get file size */
- file_size = h5_get_file_size(filename, fapl_id);
- VRFY((file_size >= 0), "h5_get_file_size succeeded");
-
- /* Calculate alignment value, set to allow a chunk to squeak in between the
- * original EOF and the aligned location of the aggregator. Add some space
- * for the dataset metadata */
- align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
-#endif
-
- /* Set aggregator size and alignment, disable metadata aggregator */
- assert(AGGR_SIZE > CHUNK_SIZE);
- ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE);
- VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
- ret = H5Pset_meta_block_size(fapl_id, 0);
- VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
-#if 0
- ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align);
- VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
-#endif
-
- /* Reopen file with new settings */
- file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((file_id >= 0), "H5Fopen succeeded");
-
- /* Create dataset */
- space_id = H5Screate_simple(1, &dims, NULL);
- VRFY((space_id >= 0), "H5Screate_simple succeeded");
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
- ret = H5Pset_chunk(dcpl_id, 1, &cdims);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
- dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
-
- /* Close ids */
- ret = H5Dclose(dset_id);
- VRFY((dset_id >= 0), "H5Dclose succeeded");
- ret = H5Sclose(space_id);
- VRFY((space_id >= 0), "H5Sclose succeeded");
- ret = H5Pclose(dcpl_id);
- VRFY((dcpl_id >= 0), "H5Pclose succeeded");
- ret = H5Pclose(fapl_id);
- VRFY((fapl_id >= 0), "H5Pclose succeeded");
-
- /* Close file */
- ret = H5Fclose(file_id);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- return;
-} /* end chunk_align_bug_1() */
-
-/*=============================================================================
- * End of t_mdset.c
- *===========================================================================*/
diff --git a/testpar/API/t_ph5basic.c b/testpar/API/t_ph5basic.c
deleted file mode 100644
index 9c980bf..0000000
--- a/testpar/API/t_ph5basic.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Test parallel HDF5 basic components
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-/*-------------------------------------------------------------------------
- * Function: test_fapl_mpio_dup
- *
- * Purpose: Test if fapl_mpio property list keeps a duplicate of the
- * communicator and INFO objects given when set; and returns
- * duplicates of its components when H5Pget_fapl_mpio is called.
- *
- * Return: Success: None
- * Failure: Abort
- *-------------------------------------------------------------------------
- */
-void
-test_fapl_mpio_dup(void)
-{
- int mpi_size, mpi_rank;
- MPI_Comm comm, comm_tmp;
- int mpi_size_old, mpi_rank_old;
- int mpi_size_tmp, mpi_rank_tmp;
- MPI_Info info = MPI_INFO_NULL;
- MPI_Info info_tmp = MPI_INFO_NULL;
- int mrc; /* MPI return value */
- hid_t acc_pl; /* File access properties */
- herr_t ret; /* HDF5 return value */
- int nkeys, nkeys_tmp;
-
- if (VERBOSE_MED)
- printf("Verify fapl_mpio duplicates communicator and INFO objects\n");
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- if (VERBOSE_MED)
- printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
-
- /* Create a new communicator that has the same processes as MPI_COMM_WORLD.
- * Use MPI_Comm_split because it is simpler than MPI_Comm_create
- */
- mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
- MPI_Comm_size(comm, &mpi_size_old);
- MPI_Comm_rank(comm, &mpi_rank_old);
- if (VERBOSE_MED)
- printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
-
- /* create a new INFO object with some trivial information. */
- mrc = MPI_Info_create(&info);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_create");
- mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_set");
- if (MPI_INFO_NULL != info) {
- mrc = MPI_Info_get_nkeys(info, &nkeys);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
- }
-#if 0
- if (VERBOSE_MED)
- h5_dump_info_object(info);
-#endif
-
- acc_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");
-
- ret = H5Pset_fapl_mpio(acc_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* Case 1:
- * Free the created communicator and INFO object.
- * Check if the access property list is still valid and can return
- * valid communicator and INFO object.
- */
- mrc = MPI_Comm_free(&comm);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL != info) {
- mrc = MPI_Info_free(&info);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
- }
-
- ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
- VRFY((ret >= 0), "H5Pget_fapl_mpio");
- MPI_Comm_size(comm_tmp, &mpi_size_tmp);
- MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
- if (VERBOSE_MED)
- printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
- VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
- VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
- VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
- }
-#if 0
- if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
-#endif
-
- /* Case 2:
- * Free the retrieved communicator and INFO object.
- * Check if the access property list is still valid and can return
- * valid communicator and INFO object.
- * Also verify the NULL argument option.
- */
- mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
- }
-
- /* check NULL argument options. */
- ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
- VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
- mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
-
- ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
- VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
- }
-
- ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
- VRFY((ret >= 0), "H5Pget_fapl_mpio neither");
-
- /* now get both and check validity too. */
- /* Do not free the returned objects which are used in the next case. */
- ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
- VRFY((ret >= 0), "H5Pget_fapl_mpio");
- MPI_Comm_size(comm_tmp, &mpi_size_tmp);
- MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
- if (VERBOSE_MED)
- printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
- VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
- VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
- VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
- }
-#if 0
- if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
-#endif
-
- /* Case 3:
- * Close the property list and verify the retrieved communicator and INFO
- * object are still valid.
- */
- H5Pclose(acc_pl);
- MPI_Comm_size(comm_tmp, &mpi_size_tmp);
- MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
- if (VERBOSE_MED)
- printf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
- }
-#if 0
- if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
-#endif
-
- /* clean up */
- mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL != info_tmp) {
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
- }
-} /* end test_fapl_mpio_dup() */
diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c
deleted file mode 100644
index a4d90c4..0000000
--- a/testpar/API/t_prop.c
+++ /dev/null
@@ -1,646 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Parallel tests for encoding/decoding plists sent between processes
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#if 0
-#include "H5ACprivate.h"
-#include "H5Pprivate.h"
-#endif
-
-static int
-test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
-{
- MPI_Request req[2];
- MPI_Status status;
- hid_t pl; /* Decoded property list */
- size_t buf_size = 0;
- void *sbuf = NULL;
- herr_t ret; /* Generic return value */
-
- if (mpi_rank == 0) {
- int send_size = 0;
-
- /* first call to encode returns only the size of the buffer needed */
- ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT);
- VRFY((ret >= 0), "H5Pencode succeeded");
-
- sbuf = (uint8_t *)malloc(buf_size);
-
- ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT);
- VRFY((ret >= 0), "H5Pencode succeeded");
-
- /* this is a temp fix to send this size_t */
- send_size = (int)buf_size;
-
- MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]);
- MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
- } /* end if */
-
- if (mpi_rank == recv_proc) {
- int recv_size;
- void *rbuf;
-
- MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
- VRFY((recv_size >= 0), "MPI_Recv succeeded");
- buf_size = (size_t)recv_size;
- rbuf = (uint8_t *)malloc(buf_size);
- MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
-
- pl = H5Pdecode(rbuf);
- VRFY((pl >= 0), "H5Pdecode succeeded");
-
- VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded");
-
- ret = H5Pclose(pl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- if (NULL != rbuf)
- free(rbuf);
- } /* end if */
-
- if (0 == mpi_rank) {
- /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status
- * array. See the discussion here:
- *
- * https://github.com/pmodels/mpich/issues/5687
- */
- /* H5_GCC_DIAG_OFF("stringop-overflow") */
- MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
- /* H5_GCC_DIAG_ON("stringop-overflow") */
- }
-
- if (NULL != sbuf)
- free(sbuf);
-
- MPI_Barrier(MPI_COMM_WORLD);
- return 0;
-}
-
-void
-test_plist_ed(void)
-{
- hid_t dcpl; /* dataset create prop. list */
- hid_t dapl; /* dataset access prop. list */
- hid_t dxpl; /* dataset transfer prop. list */
- hid_t gcpl; /* group create prop. list */
- hid_t lcpl; /* link create prop. list */
- hid_t lapl; /* link access prop. list */
- hid_t ocpypl; /* object copy prop. list */
- hid_t ocpl; /* object create prop. list */
- hid_t fapl; /* file access prop. list */
- hid_t fcpl; /* file create prop. list */
- hid_t strcpl; /* string create prop. list */
- hid_t acpl; /* attribute create prop. list */
-
- int mpi_size, mpi_rank, recv_proc;
-
- hsize_t chunk_size = 16384; /* chunk size */
- double fill = 2.7; /* Fill value */
- size_t nslots = 521 * 2;
- size_t nbytes = 1048576 * 10;
- double w0 = 0.5;
- unsigned max_compact;
- unsigned min_dense;
- hsize_t max_size[1]; /*data space maximum size */
- const char *c_to_f = "x+32";
- H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
- true,
- false,
- false,
- "temp",
- true,
- false,
- (2 * 2048 * 1024),
- 0.3,
- (64 * 1024 * 1024),
- (4 * 1024 * 1024),
- 60000,
- H5C_incr__threshold,
- 0.8,
- 3.0,
- true,
- (8 * 1024 * 1024),
- H5C_flash_incr__add_space,
- 2.0,
- 0.25,
- H5C_decr__age_out_with_threshold,
- 0.997,
- 0.8,
- true,
- (3 * 1024 * 1024),
- 3,
- false,
- 0.2,
- (256 * 2048),
- 1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */};
-
- herr_t ret; /* Generic return value */
-
- if (VERBOSE_MED)
- printf("Encode/Decode DCPLs\n");
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- if (mpi_size == 1)
- recv_proc = 0;
- else
- recv_proc = 1;
-
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_chunk(dcpl, 1, &chunk_size);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
-
- ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
- VRFY((ret >= 0), "H5Pset_alloc_time succeeded");
-
- ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
- VRFY((ret >= 0), "set fill-value succeeded");
-
- max_size[0] = 100;
- ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
- VRFY((ret >= 0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
- VRFY((ret >= 0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
- VRFY((ret >= 0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
- VRFY((ret >= 0), "set external succeeded");
-
- ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(dcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE DAPLS *****/
- dapl = H5Pcreate(H5P_DATASET_ACCESS);
- VRFY((dapl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0);
- VRFY((ret >= 0), "H5Pset_chunk_cache succeeded");
-
- ret = test_encode_decode(dapl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(dapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE OCPLS *****/
- ocpl = H5Pcreate(H5P_OBJECT_CREATE);
- VRFY((ocpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
- VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded");
-
- ret = H5Pset_attr_phase_change(ocpl, 110, 105);
- VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded");
-
- ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL);
- VRFY((ret >= 0), "H5Pset_filter succeeded");
-
- ret = test_encode_decode(ocpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(ocpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE DXPLS *****/
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2);
- VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");
-
- ret = H5Pset_hyper_vector_size(dxpl, 5);
- VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded");
-
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded");
-
- ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
-
- ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
-
- ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
-
- ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC);
- VRFY((ret >= 0), "H5Pset_edc_check succeeded");
-
- ret = H5Pset_data_transform(dxpl, c_to_f);
- VRFY((ret >= 0), "H5Pset_data_transform succeeded");
-
- ret = test_encode_decode(dxpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(dxpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE GCPLS *****/
- gcpl = H5Pcreate(H5P_GROUP_CREATE);
- VRFY((gcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_local_heap_size_hint(gcpl, 256);
- VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded");
-
- ret = H5Pset_link_phase_change(gcpl, 2, 2);
- VRFY((ret >= 0), "H5Pset_link_phase_change succeeded");
-
- /* Query the group creation properties */
- ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense);
- VRFY((ret >= 0), "H5Pget_est_link_info succeeded");
-
- ret = H5Pset_est_link_info(gcpl, 3, 9);
- VRFY((ret >= 0), "H5Pset_est_link_info succeeded");
-
- ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
- VRFY((ret >= 0), "H5Pset_link_creation_order succeeded");
-
- ret = test_encode_decode(gcpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(gcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE LCPLS *****/
- lcpl = H5Pcreate(H5P_LINK_CREATE);
- VRFY((lcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_create_intermediate_group(lcpl, true);
- VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
-
- ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(lcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE LAPLS *****/
- lapl = H5Pcreate(H5P_LINK_ACCESS);
- VRFY((lapl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_nlinks(lapl, (size_t)134);
- VRFY((ret >= 0), "H5Pset_nlinks succeeded");
-
- ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY);
- VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded");
-
- ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod");
- VRFY((ret >= 0), "H5Pset_nlinks succeeded");
-
- /* Create FAPL for the elink FAPL */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_alignment(fapl, 2, 1024);
- VRFY((ret >= 0), "H5Pset_alignment succeeded");
-
- ret = H5Pset_elink_fapl(lapl, fapl);
- VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
-
- /* Close the elink's FAPL */
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- ret = test_encode_decode(lapl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(lapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE OCPYPLS *****/
- ocpypl = H5Pcreate(H5P_OBJECT_COPY);
- VRFY((ocpypl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG);
- VRFY((ret >= 0), "H5Pset_copy_object succeeded");
-
- ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo");
- VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
-
- ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar");
- VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
-
- ret = test_encode_decode(ocpypl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(ocpypl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE FAPLS *****/
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_family_offset(fapl, 1024);
- VRFY((ret >= 0), "H5Pset_family_offset succeeded");
-
- ret = H5Pset_meta_block_size(fapl, 2098452);
- VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
-
- ret = H5Pset_sieve_buf_size(fapl, 1048576);
- VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded");
-
- ret = H5Pset_alignment(fapl, 2, 1024);
- VRFY((ret >= 0), "H5Pset_alignment succeeded");
-
- ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-
- ret = H5Pset_elink_file_cache_size(fapl, 10485760);
- VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded");
-
- ret = H5Pset_gc_references(fapl, 1);
- VRFY((ret >= 0), "H5Pset_gc_references succeeded");
-
- ret = H5Pset_small_data_block_size(fapl, 2048);
- VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
-
- ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- VRFY((ret >= 0), "H5Pset_libver_bounds succeeded");
-
- ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK);
- VRFY((ret >= 0), "H5Pset_fclose_degree succeeded");
-
- ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP);
- VRFY((ret >= 0), "H5Pset_multi_type succeeded");
-
- ret = H5Pset_mdc_config(fapl, &my_cache_config);
- VRFY((ret >= 0), "H5Pset_mdc_config succeeded");
-
- ret = test_encode_decode(fapl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE FCPLS *****/
- fcpl = H5Pcreate(H5P_FILE_CREATE);
- VRFY((fcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_userblock(fcpl, 1024);
- VRFY((ret >= 0), "H5Pset_userblock succeeded");
-
- ret = H5Pset_istore_k(fcpl, 3);
- VRFY((ret >= 0), "H5Pset_istore_k succeeded");
-
- ret = H5Pset_sym_k(fcpl, 4, 5);
- VRFY((ret >= 0), "H5Pset_sym_k succeeded");
-
- ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
- VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");
-
- ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
- VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");
-
- ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
- VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded");
-
- ret = H5Pset_sizes(fcpl, 8, 4);
- VRFY((ret >= 0), "H5Pset_sizes succeeded");
-
- ret = test_encode_decode(fcpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(fcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE STRCPLS *****/
- strcpl = H5Pcreate(H5P_STRING_CREATE);
- VRFY((strcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8);
- VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
-
- ret = test_encode_decode(strcpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(strcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /******* ENCODE/DECODE ACPLS *****/
- acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
- VRFY((acpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8);
- VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
-
- ret = test_encode_decode(acpl, mpi_rank, recv_proc);
- VRFY((ret >= 0), "test_encode_decode succeeded");
-
- ret = H5Pclose(acpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-}
-
-#if 0
-void
-external_links(void)
-{
- hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */
- hid_t lapl = H5I_INVALID_HID; /* link access prop. list */
- hid_t fapl = H5I_INVALID_HID; /* file access prop. list */
- hid_t gapl = H5I_INVALID_HID; /* group access prop. list */
- hid_t fid = H5I_INVALID_HID; /* file id */
- hid_t group = H5I_INVALID_HID; /* group id */
- int mpi_size, mpi_rank;
-
- MPI_Comm comm;
- int doIO;
- int i, mrc;
-
- herr_t ret; /* Generic return value */
- htri_t tri_status; /* tri return value */
-
- const char *filename = "HDF5test.h5";
- const char *filename_ext = "HDF5test_ext.h5";
- const char *group_path = "/Base/Block/Step";
- const char *link_name = "link"; /* external link */
- char link_path[50];
-
- if (VERBOSE_MED)
- printf("Check external links\n");
-
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Check MPI communicator access properties are passed to
- linked external files */
-
- if (mpi_rank == 0) {
-
- lcpl = H5Pcreate(H5P_LINK_CREATE);
- VRFY((lcpl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_create_intermediate_group(lcpl, 1);
- VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
-
- /* Create file to serve as target for external link.*/
- fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((group >= 0), "H5Gcreate succeeded");
-
- ret = H5Gclose(group);
- VRFY((ret >= 0), "H5Gclose succeeded");
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
-
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
-
- /* Create a new file using the file access property list. */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((group >= 0), "H5Gcreate succeeded");
-
- /* Create external links to the target files. */
- ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((ret >= 0), "H5Lcreate_external succeeded");
-
- /* Close and release resources. */
- ret = H5Pclose(lcpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Gclose(group);
- VRFY((ret >= 0), "H5Gclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /*
- * For the first case, use all the processes. For the second case
- * use a sub-communicator to verify the correct communicator is
- * being used for the externally linked files.
- * There is no way to determine if MPI info is being used for the
- * externally linked files.
- */
-
- for (i = 0; i < 2; i++) {
-
- comm = MPI_COMM_WORLD;
-
- if (i == 0)
- doIO = 1;
- else {
- doIO = mpi_rank % 2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm);
- VRFY((mrc == MPI_SUCCESS), "");
- }
-
- if (doIO) {
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL);
- VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded");
-
- fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
- VRFY((fid >= 0), "H5Fopen succeeded");
-
- /* test opening a group that is to an external link, the external linked
- file should inherit the source file's access properties */
- snprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
- group = H5Gopen2(fid, link_path, H5P_DEFAULT);
- VRFY((group >= 0), "H5Gopen succeeded");
- ret = H5Gclose(group);
- VRFY((ret >= 0), "H5Gclose succeeded");
-
- /* test opening a group that is external link by setting group
- creation property */
- gapl = H5Pcreate(H5P_GROUP_ACCESS);
- VRFY((gapl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_elink_fapl(gapl, fapl);
- VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
-
- group = H5Gopen2(fid, link_path, gapl);
- VRFY((group >= 0), "H5Gopen succeeded");
-
- ret = H5Gclose(group);
- VRFY((ret >= 0), "H5Gclose succeeded");
-
- ret = H5Pclose(gapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* test link APIs */
- lapl = H5Pcreate(H5P_LINK_ACCESS);
- VRFY((lapl >= 0), "H5Pcreate succeeded");
-
- ret = H5Pset_elink_fapl(lapl, fapl);
- VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
-
- tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
- VRFY((tri_status == true), "H5Lexists succeeded");
-
- tri_status = H5Lexists(fid, link_path, lapl);
- VRFY((tri_status == true), "H5Lexists succeeded");
-
- group = H5Oopen(fid, link_path, H5P_DEFAULT);
- VRFY((group >= 0), "H5Oopen succeeded");
-
- ret = H5Oclose(group);
- VRFY((ret >= 0), "H5Oclose succeeded");
-
- group = H5Oopen(fid, link_path, lapl);
- VRFY((group >= 0), "H5Oopen succeeded");
-
- ret = H5Oclose(group);
- VRFY((ret >= 0), "H5Oclose succeeded");
-
- ret = H5Pclose(lapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- /* close the remaining resources */
-
- ret = H5Pclose(fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
-
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- if (comm != MPI_COMM_WORLD) {
- mrc = MPI_Comm_free(&comm);
- VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
- }
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* delete the test files */
- if (mpi_rank == 0) {
- MPI_File_delete(filename, MPI_INFO_NULL);
- MPI_File_delete(filename_ext, MPI_INFO_NULL);
- }
-}
-#endif
diff --git a/testpar/API/t_pshutdown.c b/testpar/API/t_pshutdown.c
deleted file mode 100644
index fad9ea3..0000000
--- a/testpar/API/t_pshutdown.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Purpose: This test creates a file and a bunch of objects in the
- * file and then calls MPI_Finalize without closing anything. The
- * library should exercise the attribute callback destroy attached to
- * MPI_COMM_SELF and terminate the HDF5 library closing all open
- * objects. The t_prestart test will read back the file and make sure
- * all created objects are there.
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-int nerrors = 0; /* errors count */
-
-const char *FILENAME[] = {"shutdown.h5", NULL};
-
-int
-main(int argc, char **argv)
-{
- hid_t file_id, dset_id, grp_id;
- hid_t fapl, sid, mem_dataspace;
- hsize_t dims[RANK], i;
- herr_t ret;
-#if 0
- char filename[1024];
-#endif
- int mpi_size, mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- DATATYPE *data_array = NULL; /* data buffer */
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- if (MAINPROCESS) {
- printf("Testing %-62s", "proper shutdown of HDF5 library");
- fflush(stdout);
- }
-
- /* Set up file access property list with parallel I/O access */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
-
- /* Get the capability flag of the VOL connector being used */
- ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
- VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(
- " API functions for basic file, group, or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- MPI_Finalize();
- return 0;
- }
-
- ret = H5Pset_fapl_mpio(fapl, comm, info);
- VRFY((ret >= 0), "");
-
-#if 0
- h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
-#endif
- file_id = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
- grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((grp_id >= 0), "H5Gcreate succeeded");
-
- dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
- dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate succeeded");
-
- /* allocate memory for data buffer */
- data_array = (DATATYPE *)malloc(dims[0] * dims[1] * sizeof(DATATYPE));
- VRFY((data_array != NULL), "data_array malloc succeeded");
-
- /* Each process takes a slabs of rows. */
- block[0] = dims[0] / (hsize_t)mpi_size;
- block[1] = dims[1];
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
-
- /* put some trivial data in the data_array */
- for (i = 0; i < dims[0] * dims[1]; i++)
- data_array[i] = mpi_rank + 1;
-
- ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* write data independently */
- ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
- VRFY((ret >= 0), "H5Dwrite succeeded");
-
- /* release data buffers */
- if (data_array)
- free(data_array);
-
- MPI_Finalize();
-
- /* nerrors += GetTestNumErrs(); */
-
- if (MAINPROCESS) {
- if (0 == nerrors) {
- puts(" PASSED");
- fflush(stdout);
- }
- else {
- puts("*FAILED*");
- fflush(stdout);
- }
- }
-
- return (nerrors != 0);
-}
diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c
deleted file mode 100644
index 004ce1e..0000000
--- a/testpar/API/t_shapesame.c
+++ /dev/null
@@ -1,4484 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- This program will test independent and collective reads and writes between
- selections of different rank that non-the-less are deemed as having the
- same shape by H5Sselect_shape_same().
- */
-
-#define H5S_FRIEND /*suppress error about including H5Spkg */
-
-/* Define this macro to indicate that the testing APIs should be available */
-#define H5S_TESTING
-
-#if 0
-#include "H5Spkg.h" /* Dataspaces */
-#endif
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif
-
-/* FILENAME and filenames must have the same number of names.
- * Use PARATESTFILE in general and use a separated filename only if the file
- * created in one test is accessed by a different test.
- * filenames[0] is reserved as the file name for PARATESTFILE.
- */
-#define NFILENAME 2
-const char *FILENAME[NFILENAME] = {"ShapeSameTest.h5", NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
-
-/* On Lustre (and perhaps other parallel file systems?), we have severe
- * slow downs if two or more processes attempt to access the same file system
- * block. To minimize this problem, we set alignment in the shape same tests
- * to the default Lustre block size -- which greatly reduces contention in
- * the chunked dataset case.
- */
-
-#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
-
-#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
-
-struct hs_dr_pio_test_vars_t {
- int mpi_size;
- int mpi_rank;
- MPI_Comm mpi_comm;
- MPI_Info mpi_info;
- int test_num;
- int edge_size;
- int checker_edge_size;
- int chunk_edge_size;
- int small_rank;
- int large_rank;
- hid_t dset_type;
- uint32_t *small_ds_buf_0;
- uint32_t *small_ds_buf_1;
- uint32_t *small_ds_buf_2;
- uint32_t *small_ds_slice_buf;
- uint32_t *large_ds_buf_0;
- uint32_t *large_ds_buf_1;
- uint32_t *large_ds_buf_2;
- uint32_t *large_ds_slice_buf;
- int small_ds_offset;
- int large_ds_offset;
- hid_t fid; /* HDF5 file ID */
- hid_t xfer_plist;
- hid_t full_mem_small_ds_sid;
- hid_t full_file_small_ds_sid;
- hid_t mem_small_ds_sid;
- hid_t file_small_ds_sid_0;
- hid_t file_small_ds_sid_1;
- hid_t small_ds_slice_sid;
- hid_t full_mem_large_ds_sid;
- hid_t full_file_large_ds_sid;
- hid_t mem_large_ds_sid;
- hid_t file_large_ds_sid_0;
- hid_t file_large_ds_sid_1;
- hid_t file_large_ds_process_slice_sid;
- hid_t mem_large_ds_process_slice_sid;
- hid_t large_ds_slice_sid;
- hid_t small_dataset; /* Dataset ID */
- hid_t large_dataset; /* Dataset ID */
- size_t small_ds_size;
- size_t small_ds_slice_size;
- size_t large_ds_size;
- size_t large_ds_slice_size;
- hsize_t dims[PAR_SS_DR_MAX_RANK];
- hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- hsize_t *start_ptr;
- hsize_t *stride_ptr;
- hsize_t *count_ptr;
- hsize_t *block_ptr;
- int skips;
- int max_skips;
- int64_t total_tests;
- int64_t tests_run;
- int64_t tests_skipped;
-};
-
-/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__setup()
- *
- * Purpose: Do setup for tests of I/O to/from hyperslab selections of
- * different rank in the parallel case.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0
-
-static void
-hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
- const int chunk_edge_size, const int small_rank, const int large_rank,
- const bool use_collective_io, const hid_t dset_type, const int express_test,
- struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
- const char *fcnName = "hs_dr_pio_test__setup()";
-#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
- const char *filename;
- bool mis_match = false;
- int i;
- int mrc;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
- uint32_t *ptr_0;
- uint32_t *ptr_1;
- hid_t acc_tpl; /* File access templates */
- hid_t small_ds_dcpl_id = H5P_DEFAULT;
- hid_t large_ds_dcpl_id = H5P_DEFAULT;
- herr_t ret; /* Generic return value */
-
- assert(edge_size >= 6);
- assert(edge_size >= chunk_edge_size);
- assert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
- assert(1 < small_rank);
- assert(small_rank < large_rank);
- assert(large_rank <= PAR_SS_DR_MAX_RANK);
-
- tv_ptr->test_num = test_num;
- tv_ptr->edge_size = edge_size;
- tv_ptr->checker_edge_size = checker_edge_size;
- tv_ptr->chunk_edge_size = chunk_edge_size;
- tv_ptr->small_rank = small_rank;
- tv_ptr->large_rank = large_rank;
- tv_ptr->dset_type = dset_type;
-
- MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size));
- MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank));
- /* the VRFY() macro needs the local variable mpi_rank -- set it up now */
- mpi_rank = tv_ptr->mpi_rank;
-
- assert(tv_ptr->mpi_size >= 1);
-
- tv_ptr->mpi_comm = MPI_COMM_WORLD;
- tv_ptr->mpi_info = MPI_INFO_NULL;
-
- for (i = 0; i < tv_ptr->small_rank - 1; i++) {
- tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size);
- tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size);
- }
- tv_ptr->small_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
-
- /* used by checker board tests only */
- tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank;
-
- assert(0 < tv_ptr->small_ds_offset);
- assert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK);
-
- for (i = 0; i < tv_ptr->large_rank - 1; i++) {
-
- tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size);
- tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size);
- }
- tv_ptr->large_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
-
- /* used by checker board tests only */
- tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank;
-
- assert(0 <= tv_ptr->large_ds_offset);
- assert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK);
-
- /* set up the start, stride, count, and block pointers */
- /* used by contiguous tests only */
- tv_ptr->start_ptr = &(tv_ptr->start[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
- tv_ptr->stride_ptr = &(tv_ptr->stride[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
- tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
- tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
-
- /* Allocate buffers */
- tv_ptr->small_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
- VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
-
- tv_ptr->small_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
- VRFY((tv_ptr->small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
-
- tv_ptr->small_ds_buf_2 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
- VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
-
- tv_ptr->small_ds_slice_buf = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
- VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
-
- tv_ptr->large_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
- VRFY((tv_ptr->large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
-
- tv_ptr->large_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
- VRFY((tv_ptr->large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
-
- tv_ptr->large_ds_buf_2 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
- VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
-
- tv_ptr->large_ds_slice_buf = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
- VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
-
- /* initialize the buffers */
-
- ptr_0 = tv_ptr->small_ds_buf_0;
- for (i = 0; i < (int)(tv_ptr->small_ds_size); i++)
- *ptr_0++ = (uint32_t)i;
- memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
- memset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-
- memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-
- ptr_0 = tv_ptr->large_ds_buf_0;
- for (i = 0; i < (int)(tv_ptr->large_ds_size); i++)
- *ptr_0++ = (uint32_t)i;
- memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
- memset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-
- memset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
-
- filename = filenames[0]; /* (const char *)GetTestParameters(); */
- assert(filename != NULL);
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
- if (MAINPROCESS) {
-
- fprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
- fprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
- fprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank,
- tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io);
- fprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size,
- tv_ptr->chunk_edge_size);
- fprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
- fprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank,
- (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size));
- fprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
- /* ----------------------------------------
- * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
- * ---------------------------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(tv_ptr->mpi_comm, tv_ptr->mpi_info, facc_type);
- VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
-
- /* set the alignment -- need it large so that we aren't always hitting the
- * the same file system block. Do this only if express_test is greater
- * than zero.
- */
- if (express_test > 0) {
-
- ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT);
- VRFY((ret != FAIL), "H5Pset_alignment() succeeded");
- }
-
- /* create the file collectively */
- tv_ptr->fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((tv_ptr->fid >= 0), "H5Fcreate succeeded");
-
- MESG("File opened.");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
-
- /* setup dims: */
- tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
- tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
-
- /* Create small ds dataspaces */
- tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
-
- tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
-
- tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
-
- tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded");
-
- /* used by checker board tests only */
- tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded");
-
- tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded");
-
- /* Create large ds dataspaces */
- tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
-
- tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded");
-
- tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded");
-
- tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded");
-
- /* used by checker board tests only */
- tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded");
-
- tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
- "H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
-
- tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
- "H5Screate_simple() file_large_ds_process_slice_sid succeeded");
-
- tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded");
-
- /* if chunk edge size is greater than zero, set up the small and
- * large data set creation property lists to specify chunked
- * datasets.
- */
- if (tv_ptr->chunk_edge_size > 0) {
-
- /* Under Lustre (and perhaps other parallel file systems?) we get
- * locking delays when two or more processes attempt to access the
- * same file system block.
- *
- * To minimize this problem, I have changed chunk_dims[0]
- * from (mpi_size + 1) to just when any sort of express test is
- * selected. Given the structure of the test, and assuming we
- * set the alignment large enough, this avoids the contention
- * issue by seeing to it that each chunk is only accessed by one
- * process.
- *
- * One can argue as to whether this is a good thing to do in our
- * tests, but for now it is necessary if we want the test to complete
- * in a reasonable amount of time.
- *
- * JRM -- 9/16/10
- */
-
- tv_ptr->chunk_dims[0] = 1;
-
- tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] =
- (hsize_t)(tv_ptr->chunk_edge_size);
-
- small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
-
- ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
- VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
-
- ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims);
- VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
-
- large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
-
- ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
- VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
-
- ret = H5Pset_chunk(large_ds_dcpl_id, tv_ptr->large_rank, tv_ptr->chunk_dims);
- VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
- }
-
- /* create the small dataset */
- tv_ptr->small_dataset =
- H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT,
- small_ds_dcpl_id, H5P_DEFAULT);
- VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
-
- /* create the large dataset */
- tv_ptr->large_dataset =
- H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT,
- large_ds_dcpl_id, H5P_DEFAULT);
- VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
-
- /* setup xfer property list */
- tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (use_collective_io) {
- ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
- /* setup selection to write initial data to the small and large data sets */
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- /* setup selections for writing initial data to the small data set */
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
-
- if (MAINPROCESS) { /* add an additional slice to the selections */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded");
- }
-
- /* write the initial value of the small data set to file */
- ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
-
- VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
-
- /* sync with the other processes before checking data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
-
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
- * data set and verifies it.
- */
- ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid,
- tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
-
- /* verify that the correct data was written to the small data set */
- expected_value = 0;
- mis_match = false;
- ptr_1 = tv_ptr->small_ds_buf_1;
-
- i = 0;
- for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
- ptr_1++;
- expected_value++;
- }
- VRFY((mis_match == false), "small ds init data good.");
-
- /* setup selections for writing initial data to the large data set */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
-
- /* In passing, setup the process slice dataspaces as well */
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
- tv_ptr->stride, tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
- tv_ptr->stride, tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded");
-
- if (MAINPROCESS) { /* add an additional slice to the selections */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded");
- }
-
- /* write the initial value of the large data set to file */
- ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
-
- /* sync with the other processes before checking data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
-
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
- * data set.
- */
- ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid,
- tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
-
- /* verify that the correct data was written to the large data set */
- expected_value = 0;
- mis_match = false;
- ptr_1 = tv_ptr->large_ds_buf_1;
-
- i = 0;
- for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
- ptr_1++;
- expected_value++;
- }
- VRFY((mis_match == false), "large ds init data good.");
-
- /* sync with the other processes before changing data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
-
- return;
-
-} /* hs_dr_pio_test__setup() */
-
-/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__takedown()
- *
- * Purpose: Do takedown after tests of I/O to/from hyperslab selections
- * of different rank in the parallel case.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0
-
-static void
-hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
- const char *fcnName = "hs_dr_pio_test__takedown()";
-#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
- int mpi_rank; /* needed by the VRFY macro */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* Close property lists */
- if (tv_ptr->xfer_plist != H5P_DEFAULT) {
- ret = H5Pclose(tv_ptr->xfer_plist);
- VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded");
- }
-
- /* Close dataspaces */
- ret = H5Sclose(tv_ptr->full_mem_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->full_file_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->mem_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->file_small_ds_sid_0);
- VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded");
-
- ret = H5Sclose(tv_ptr->file_small_ds_sid_1);
- VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded");
-
- ret = H5Sclose(tv_ptr->small_ds_slice_sid);
- VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->full_mem_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->full_file_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->mem_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->file_large_ds_sid_0);
- VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_0) succeeded");
-
- ret = H5Sclose(tv_ptr->file_large_ds_sid_1);
- VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_1) succeeded");
-
- ret = H5Sclose(tv_ptr->mem_large_ds_process_slice_sid);
- VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->file_large_ds_process_slice_sid);
- VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded");
-
- ret = H5Sclose(tv_ptr->large_ds_slice_sid);
- VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded");
-
- /* Close Datasets */
- ret = H5Dclose(tv_ptr->small_dataset);
- VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
-
- ret = H5Dclose(tv_ptr->large_dataset);
- VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
-
- /* close the file collectively */
- MESG("about to close file.");
- ret = H5Fclose(tv_ptr->fid);
- VRFY((ret != FAIL), "file close succeeded");
-
- /* Free memory buffers */
-
- if (tv_ptr->small_ds_buf_0 != NULL)
- free(tv_ptr->small_ds_buf_0);
- if (tv_ptr->small_ds_buf_1 != NULL)
- free(tv_ptr->small_ds_buf_1);
- if (tv_ptr->small_ds_buf_2 != NULL)
- free(tv_ptr->small_ds_buf_2);
- if (tv_ptr->small_ds_slice_buf != NULL)
- free(tv_ptr->small_ds_slice_buf);
-
- if (tv_ptr->large_ds_buf_0 != NULL)
- free(tv_ptr->large_ds_buf_0);
- if (tv_ptr->large_ds_buf_1 != NULL)
- free(tv_ptr->large_ds_buf_1);
- if (tv_ptr->large_ds_buf_2 != NULL)
- free(tv_ptr->large_ds_buf_2);
- if (tv_ptr->large_ds_slice_buf != NULL)
- free(tv_ptr->large_ds_slice_buf);
-
- return;
-
-} /* hs_dr_pio_test__takedown() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_l2s()
- *
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * In this function, we test this by reading small_rank - 1
- * slices from the on disk large cube, and verifying that the
- * data read is correct. Verify that H5Sselect_shape_same()
- * returns true on the memory and file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
-
-static void
-contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- const char *fcnName = "contig_hs_dr_pio_test__run_test()";
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- bool mis_match = false;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
- uint32_t *ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* We have already done a H5Sselect_all() on the dataspace
- * small_ds_slice_sid in the initialization phase, so no need to
- * call H5Sselect_all() again.
- */
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read slices of the large cube.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* zero out the buffer we will be reading into */
- memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName);
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
-
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
- * process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
- * mpi_rank, and don't iterate over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
- * of this function. Thus no need for another inner loop.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
- tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
- VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
- (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
- fprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
- H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- ret =
- H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
- VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
-
- /* verify that expected data is retrieved */
-
- mis_match = false;
- ptr_1 = tv_ptr->small_ds_slice_buf;
- expected_value =
- (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
-
- for (n = 0; n < tv_ptr->small_ds_slice_size; n++) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
-
- *ptr_1 = 0; /* zero data for next use */
-
- ptr_1++;
- expected_value++;
- }
-
- VRFY((mis_match == false), "small slice read from large ds data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* contig_hs_dr_pio_test__d2m_l2s() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_s2l()
- *
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * In this function, we test this by reading slices of the
- * on disk small data set into slices through the in memory
- * large data set, and verify that the correct data (and
- * only the correct data) is read.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
-
-static void
-contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- bool mis_match = false;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* Read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
- */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
-
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- fprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
-
- /* zero out the in memory large ds */
- memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read slices of the large cube.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
- *
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't iterate
- * over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
- tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
- VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
- (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
-#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
- /* verify that the expected data and only the
- * expected data was read.
- */
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
- start_index =
- (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
- assert(start_index < stop_index);
- assert(stop_index <= tv_ptr->large_ds_size);
-
- for (n = 0; n < tv_ptr->large_ds_size; n++) {
-
- if ((n >= start_index) && (n <= stop_index)) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
- expected_value++;
- }
- else {
-
- if (*ptr_1 != 0) {
-
- mis_match = true;
- }
- }
- /* zero out the value for the next pass */
- *ptr_1 = 0;
-
- ptr_1++;
- }
-
- VRFY((mis_match == false), "small slice read from large ds data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* contig_hs_dr_pio_test__d2m_s2l() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_l2s()
- *
- * Purpose: Part three of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can write from memory to file using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * Do this by writing small_rank - 1 dimensional slices from
- * the in memory large data set to the on disk small cube
- * dataset. After each write, read the slice of the small
- * dataset back from disk, and verify that it contains
- * the expected data. Verify that H5Sselect_shape_same()
- * returns true on the memory and file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
-
-static void
-contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- bool mis_match = false;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* now we go in the opposite direction, verifying that we can write
- * from memory to file using selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * Start by writing small_rank - 1 dimensional slices from the in memory large
- * data set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5Sselect_shape_same() returns true on
- * the memory and file selections.
- */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read slices of the large cube.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* zero out the in memory small ds */
- memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- fprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName);
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
-
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
- *
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't iterate
- * over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- j = 0;
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
-
- /* zero out this rank's slice of the on disk small data set */
- ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
- VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
-
- /* select the portion of the in memory large cube from which we
- * are going to write data.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
- tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
- VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
-
- /* verify that H5Sselect_shape_same() reports the in
- * memory slice through the cube selection and the
- * on disk full square selections as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed.");
-
- /* write the slice from the in memory large data set to the
- * slice of the on disk small dataset. */
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
- (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
- VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
- /* read the on disk square into memory */
- ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
- /* verify that expected data is retrieved */
-
- mis_match = false;
- ptr_1 = tv_ptr->small_ds_buf_1;
-
- expected_value =
- (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
-
- start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
- assert(start_index < stop_index);
- assert(stop_index <= tv_ptr->small_ds_size);
-
- for (n = 0; n < tv_ptr->small_ds_size; n++) {
-
- if ((n >= start_index) && (n <= stop_index)) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
- expected_value++;
- }
- else {
-
- if (*ptr_1 != 0) {
-
- mis_match = true;
- }
- }
- /* zero out the value for the next pass */
- *ptr_1 = 0;
-
- ptr_1++;
- }
-
- VRFY((mis_match == false), "small slice write from large ds data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* contig_hs_dr_pio_test__m2d_l2s() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_s2l()
- *
- * Purpose: Part four of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can write from memory to file using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * Do this by writing the contents of the process's slice of
- * the in memory small data set to slices of the on disk
- * large data set. After each write, read the process's
- * slice of the large data set back into memory, and verify
- * that it contains the expected data.
- *
- * Verify that H5Sselect_shape_same() returns true on the
- * memory and file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
-
-static void
-contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- bool mis_match = false;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
- * each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5Sselect_shape_same() returns true on the memory
- * and file selections.
- */
-
- /* select the slice of the in memory small data set associated with
- * the process's mpi rank.
- */
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to write slices of the small data set to
- * slices of the large data set.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* zero out the in memory large ds */
- memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- fprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName);
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
-
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- fprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName,
- (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
-
- /* Zero out this processes slice of the on disk large data set.
- * Note that this will leave one slice with its original data
- * as there is one more slice than processes.
- */
- ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid,
- tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_2);
- VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
-
- /* select the portion of the in memory large cube to which we
- * are going to write data.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
- tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
- VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded");
-
- /* verify that H5Sselect_shape_same() reports the in
- * memory small data set slice selection and the
- * on disk slice through the large data set selection
- * as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* write the small data set slice from memory to the
- * target slice of the disk data set
- */
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
- (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
-
- /* read this processes slice on the on disk large
- * data set into memory.
- */
-
- ret = H5Dread(
- tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid,
- tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
-
- /* verify that the expected data and only the
- * expected data was read.
- */
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
-
- start_index =
- (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
- assert(start_index < stop_index);
- assert(stop_index < tv_ptr->large_ds_size);
-
- for (n = 0; n < tv_ptr->large_ds_size; n++) {
-
- if ((n >= start_index) && (n <= stop_index)) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
-
- expected_value++;
- }
- else {
-
- if (*ptr_1 != 0) {
-
- mis_match = true;
- }
- }
- /* zero out buffer for next test */
- *ptr_1 = 0;
- ptr_1++;
- }
-
- VRFY((mis_match == false), "small ds slice write to large ds slice data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* contig_hs_dr_pio_test__m2d_s2l() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__run_test()
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
-
-static void
-contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
- const int small_rank, const int large_rank, const bool use_collective_io,
- const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
- int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
- int mpi_rank)
-{
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- const char *fcnName = "contig_hs_dr_pio_test__run_test()";
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- struct hs_dr_pio_test_vars_t test_vars = {
- /* int mpi_size = */ -1,
- /* int mpi_rank = */ -1,
- /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
- /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
- /* int test_num = */ -1,
- /* int edge_size = */ -1,
- /* int checker_edge_size = */ -1,
- /* int chunk_edge_size = */ -1,
- /* int small_rank = */ -1,
- /* int large_rank = */ -1,
- /* hid_t dset_type = */ -1,
- /* uint32_t * small_ds_buf_0 = */ NULL,
- /* uint32_t * small_ds_buf_1 = */ NULL,
- /* uint32_t * small_ds_buf_2 = */ NULL,
- /* uint32_t * small_ds_slice_buf = */ NULL,
- /* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
- /* uint32_t * large_ds_buf_2 = */ NULL,
- /* uint32_t * large_ds_slice_buf = */ NULL,
- /* int small_ds_offset = */ -1,
- /* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
- /* hid_t xfer_plist = */ H5P_DEFAULT,
- /* hid_t full_mem_small_ds_sid = */ -1,
- /* hid_t full_file_small_ds_sid = */ -1,
- /* hid_t mem_small_ds_sid = */ -1,
- /* hid_t file_small_ds_sid_0 = */ -1,
- /* hid_t file_small_ds_sid_1 = */ -1,
- /* hid_t small_ds_slice_sid = */ -1,
- /* hid_t full_mem_large_ds_sid = */ -1,
- /* hid_t full_file_large_ds_sid = */ -1,
- /* hid_t mem_large_ds_sid = */ -1,
- /* hid_t file_large_ds_sid_0 = */ -1,
- /* hid_t file_large_ds_sid_1 = */ -1,
- /* hid_t file_large_ds_process_slice_sid = */ -1,
- /* hid_t mem_large_ds_process_slice_sid = */ -1,
- /* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
- /* size_t small_ds_size = */ 1,
- /* size_t small_ds_slice_size = */ 1,
- /* size_t large_ds_size = */ 1,
- /* size_t large_ds_slice_size = */ 1,
- /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t * start_ptr = */ NULL,
- /* hsize_t * stride_ptr = */ NULL,
- /* hsize_t * count_ptr = */ NULL,
- /* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
- /* int64_t total_tests = */ 0,
- /* int64_t tests_run = */ 0,
- /* int64_t tests_skipped = */ 0};
- struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
-
- if (MAINPROCESS)
- printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
- small_rank, large_rank);
-
- hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io,
- dset_type, express_test, tv_ptr);
-
- /* initialize skips & max_skips */
- tv_ptr->skips = *skips_ptr;
- tv_ptr->max_skips = max_skips;
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
- fprintf(stdout, "test %d: Initialization complete.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- /* first, verify that we can read from disk correctly using selections
- * of different rank that H5Sselect_shape_same() views as being of the
- * same shape.
- *
- * Start by reading small_rank - 1 dimensional slice from the on disk
- * large cube, and verifying that the data read is correct. Verify that
- * H5Sselect_shape_same() returns true on the memory and file selections.
- */
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
-
- /* Second, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
- */
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- contig_hs_dr_pio_test__d2m_s2l(tv_ptr);
-
- /* now we go in the opposite direction, verifying that we can write
- * from memory to file using selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5Sselect_shape_same() returns true on
- * the memory and file selections.
- */
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
-
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
- * each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5Sselect_shape_same() returns true on the memory
- * and file selections.
- */
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", test_num,
- (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
- (long long)(tv_ptr->total_tests));
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- hs_dr_pio_test__takedown(tv_ptr);
-
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: Takedown complete.\n", test_num);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- *skips_ptr = tv_ptr->skips;
- *total_tests_ptr += tv_ptr->total_tests;
- *tests_run_ptr += tv_ptr->tests_run;
- *tests_skipped_ptr += tv_ptr->tests_skipped;
-
- return;
-
-} /* contig_hs_dr_pio_test__run_test() */
-
-/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CONTIG_HS_DR_PIO_TEST__DEBUG 0
-
-static void
-contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
-{
- int express_test;
- int local_express_test;
- int mpi_rank = -1;
- int mpi_size;
- int test_num = 0;
- int edge_size;
- int chunk_edge_size = 0;
- int small_rank;
- int large_rank;
- int mpi_result;
- int skips = 0;
- int max_skips = 0;
- /* The following table list the number of sub-tests skipped between
- * each test that is actually executed as a function of the express
- * test level. Note that any value in excess of 4880 will cause all
- * sub tests to be skipped.
- */
- int max_skips_tbl[4] = {0, 4, 64, 1024};
- hid_t dset_type = H5T_NATIVE_UINT;
- int64_t total_tests = 0;
- int64_t tests_run = 0;
- int64_t tests_skipped = 0;
-
- HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- edge_size = (mpi_size > 6 ? mpi_size : 6);
-
- local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
-
- mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
- MPI_COMM_WORLD);
-
- VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
-
- if (local_express_test < 0) {
- max_skips = max_skips_tbl[0];
- }
- else if (local_express_test > 3) {
- max_skips = max_skips_tbl[3];
- }
- else {
- max_skips = max_skips_tbl[local_express_test];
- }
-
- for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
-
- for (small_rank = 2; small_rank < large_rank; small_rank++) {
-
- switch (sstest_type) {
- case IND_CONTIG:
- /* contiguous data set, independent I/O */
- chunk_edge_size = 0;
-
- contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
- express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case IND_CONTIG */
-
- case COL_CONTIG:
- /* contiguous data set, collective I/O */
- chunk_edge_size = 0;
-
- contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
- express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case COL_CONTIG */
-
- case IND_CHUNKED:
- /* chunked data set, independent I/O */
- chunk_edge_size = 5;
-
- contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
- express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case IND_CHUNKED */
-
- case COL_CHUNKED:
- /* chunked data set, collective I/O */
- chunk_edge_size = 5;
-
- contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
- express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case COL_CHUNKED */
-
- default:
- VRFY((false), "unknown test type");
- break;
-
- } /* end of switch(sstest_type) */
-#if CONTIG_HS_DR_PIO_TEST__DEBUG
- if ((MAINPROCESS) && (tests_skipped > 0)) {
- fprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
- total_tests);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
- }
- }
-
- if (MAINPROCESS) {
- if (tests_skipped > 0) {
- fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
- tests_skipped, total_tests);
- }
- else
- printf("\n");
- }
-
- return;
-
-} /* contig_hs_dr_pio_test() */
-
-/****************************************************************
-**
-** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
-** Given a dataspace of tgt_rank, and dimensions:
-**
-** (mpi_size + 1), edge_size, ... , edge_size
-**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the dataspace parallel to the
-** sel_rank fastest changing indices, with origin (in the
-** higher indices) as indicated by the start array.
-**
-** Note that this function, like all its relatives, is
-** hard coded to presume a maximum dataspace rank of 5.
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
-** the value of the constant.
-**
-** JRM -- 10/8/09
-**
-****************************************************************/
-
-#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
- const int edge_size, const int checker_edge_size, const int sel_rank,
- hsize_t sel_start[])
-{
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
-#endif
- bool first_selection = true;
- int i, j, k, l, m;
- int n_cube_offset;
- int sel_offset;
- const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
- /* this changes */
- hsize_t base_count;
- hsize_t offset_count;
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- herr_t ret; /* Generic return value */
-
- assert(edge_size >= 6);
- assert(0 < checker_edge_size);
- assert(checker_edge_size <= edge_size);
- assert(0 < sel_rank);
- assert(sel_rank <= tgt_rank);
- assert(tgt_rank <= test_max_rank);
- assert(test_max_rank <= PAR_SS_DR_MAX_RANK);
-
- sel_offset = test_max_rank - sel_rank;
- assert(sel_offset >= 0);
-
- n_cube_offset = test_max_rank - tgt_rank;
- assert(n_cube_offset >= 0);
- assert(n_cube_offset <= sel_offset);
-
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- fprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size,
- checker_edge_size);
- fprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
- fprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset);
-#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- /* First, compute the base count (which assumes start == 0
- * for the associated offset) and offset_count (which
- * assumes start == checker_edge_size for the associated
- * offset).
- *
- * Note that the following computation depends on the C99
- * requirement that integer division discard any fraction
- * (truncation towards zero) to function correctly. As we
- * now require C99, this shouldn't be a problem, but noting
- * it may save us some pain if we are ever obliged to support
- * pre-C99 compilers again.
- */
-
- base_count = (hsize_t)(edge_size / (checker_edge_size * 2));
-
- if ((edge_size % (checker_edge_size * 2)) > 0) {
-
- base_count++;
- }
-
- offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2));
-
- if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) {
-
- offset_count++;
- }
-
- /* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
- * the checker board.
- */
- i = 0;
- while (i < n_cube_offset) {
-
- /* these values should never be used */
- start[i] = 0;
- stride[i] = 0;
- count[i] = 0;
- block[i] = 0;
-
- i++;
- }
-
- while (i < sel_offset) {
-
- start[i] = sel_start[i];
- stride[i] = (hsize_t)(2 * edge_size);
- count[i] = 1;
- block[i] = 1;
-
- i++;
- }
-
- while (i < test_max_rank) {
-
- stride[i] = (hsize_t)(2 * checker_edge_size);
- block[i] = (hsize_t)checker_edge_size;
-
- i++;
- }
-
- i = 0;
- do {
- if (0 >= sel_offset) {
-
- if (i == 0) {
-
- start[0] = 0;
- count[0] = base_count;
- }
- else {
-
- start[0] = (hsize_t)checker_edge_size;
- count[0] = offset_count;
- }
- }
-
- j = 0;
- do {
- if (1 >= sel_offset) {
-
- if (j == 0) {
-
- start[1] = 0;
- count[1] = base_count;
- }
- else {
-
- start[1] = (hsize_t)checker_edge_size;
- count[1] = offset_count;
- }
- }
-
- k = 0;
- do {
- if (2 >= sel_offset) {
-
- if (k == 0) {
-
- start[2] = 0;
- count[2] = base_count;
- }
- else {
-
- start[2] = (hsize_t)checker_edge_size;
- count[2] = offset_count;
- }
- }
-
- l = 0;
- do {
- if (3 >= sel_offset) {
-
- if (l == 0) {
-
- start[3] = 0;
- count[3] = base_count;
- }
- else {
-
- start[3] = (hsize_t)checker_edge_size;
- count[3] = offset_count;
- }
- }
-
- m = 0;
- do {
- if (4 >= sel_offset) {
-
- if (m == 0) {
-
- start[4] = 0;
- count[4] = base_count;
- }
- else {
-
- start[4] = (hsize_t)checker_edge_size;
- count[4] = offset_count;
- }
- }
-
- if (((i + j + k + l + m) % 2) == 0) {
-
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- fprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
- (int)first_selection);
- fprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j, k,
- l, m);
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)start[0], (int)start[1], (int)start[2], (int)start[3],
- (int)start[4]);
- fprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
- (int)stride[4]);
- fprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)count[0], (int)count[1], (int)count[2], (int)count[3],
- (int)count[4]);
- fprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)block[0], (int)block[1], (int)block[2], (int)block[3],
- (int)block[4]);
- fprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
- H5Sget_simple_extent_ndims(tgt_sid));
- fprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank);
-#endif
-
- if (first_selection) {
-
- first_selection = false;
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
- &(stride[n_cube_offset]), &(count[n_cube_offset]),
- &(block[n_cube_offset]));
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
- }
- else {
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]),
- &(stride[n_cube_offset]), &(count[n_cube_offset]),
- &(block[n_cube_offset]));
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
- }
- }
-
- m++;
-
- } while ((m <= 1) && (4 >= sel_offset));
-
- l++;
-
- } while ((l <= 1) && (3 >= sel_offset));
-
- k++;
-
- } while ((k <= 1) && (2 >= sel_offset));
-
- j++;
-
- } while ((j <= 1) && (1 >= sel_offset));
-
- i++;
-
- } while ((i <= 1) && (0 >= sel_offset));
-
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(tgt_sid));
-#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- /* Clip the selection back to the dataspace proper. */
-
- for (i = 0; i < test_max_rank; i++) {
-
- start[i] = 0;
- stride[i] = (hsize_t)edge_size;
- count[i] = 1;
- block[i] = (hsize_t)edge_size;
- }
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(tgt_sid));
- fprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
-#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */
-
-/****************************************************************
-**
-** ckrbrd_hs_dr_pio_test__verify_data():
-**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return true if it does, and false
-** otherwise.
-**
-** The supplied buffer is presumed to this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
-**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
-** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indices.
-**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
-**
-** Thus for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
-**
-** (10 * 10 * x) + (10 * y) + z
-**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
-** to the checkerboard selection) as follows:
-**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
-**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
-**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube opposite the origin.)
-**
-****************************************************************/
-
-#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
-
-static bool
-ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
- const int checker_edge_size, uint32_t first_expected_val,
- bool buf_starts_in_checker)
-{
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
-#endif
- bool good_data = true;
- bool in_checker;
- bool start_in_checker[5];
- uint32_t expected_value;
- uint32_t *val_ptr;
- int i, j, k, l, m; /* to track position in n-cube */
- int v, w, x, y, z; /* to track position in checker */
- const int test_max_rank = 5; /* code changes needed if this is increased */
-
- assert(buf_ptr != NULL);
- assert(0 < rank);
- assert(rank <= test_max_rank);
- assert(edge_size >= 6);
- assert(0 < checker_edge_size);
- assert(checker_edge_size <= edge_size);
- assert(test_max_rank <= PAR_SS_DR_MAX_RANK);
-
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
-
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- fprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s rank = %d.\n", fcnName, rank);
- fprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
- fprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
- fprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
- fprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
-}
-#endif
-
-val_ptr = buf_ptr;
-expected_value = first_expected_val;
-
-i = 0;
-v = 0;
-start_in_checker[0] = buf_starts_in_checker;
-do {
- if (v >= checker_edge_size) {
-
- start_in_checker[0] = !start_in_checker[0];
- v = 0;
- }
-
- j = 0;
- w = 0;
- start_in_checker[1] = start_in_checker[0];
- do {
- if (w >= checker_edge_size) {
-
- start_in_checker[1] = !start_in_checker[1];
- w = 0;
- }
-
- k = 0;
- x = 0;
- start_in_checker[2] = start_in_checker[1];
- do {
- if (x >= checker_edge_size) {
-
- start_in_checker[2] = !start_in_checker[2];
- x = 0;
- }
-
- l = 0;
- y = 0;
- start_in_checker[3] = start_in_checker[2];
- do {
- if (y >= checker_edge_size) {
-
- start_in_checker[3] = !start_in_checker[3];
- y = 0;
- }
-
- m = 0;
- z = 0;
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- fprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
-#endif
- in_checker = start_in_checker[3];
- do {
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- fprintf(stdout, " %d", (int)(*val_ptr));
-#endif
- if (z >= checker_edge_size) {
-
- in_checker = !in_checker;
- z = 0;
- }
-
- if (in_checker) {
-
- if (*val_ptr != expected_value) {
-
- good_data = false;
- }
-
- /* zero out buffer for reuse */
- *val_ptr = 0;
- }
- else if (*val_ptr != 0) {
-
- good_data = false;
-
- /* zero out buffer for reuse */
- *val_ptr = 0;
- }
-
- val_ptr++;
- expected_value++;
- m++;
- z++;
-
- } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- fprintf(stdout, "\n");
-#endif
- l++;
- y++;
- } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
- k++;
- x++;
- } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
- j++;
- w++;
- } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
- i++;
- v++;
-} while ((rank >= test_max_rank) && (i < edge_size));
-
-return (good_data);
-
-} /* ckrbrd_hs_dr_pio_test__verify_data() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
- *
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using checker
- * board selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * In this function, we test this by reading small_rank - 1
- * checker board slices from the on disk large cube, and
- * verifying that the data read is correct. Verify that
- * H5Sselect_shape_same() returns true on the memory and
- * file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
- uint32_t *ptr_0;
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- bool data_ok = false;
- int i, j, k, l;
- uint32_t expected_value;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* first, verify that we can read from disk correctly using selections
- * of different rank that H5Sselect_shape_same() views as being of the
- * same shape.
- *
- * Start by reading a (small_rank - 1)-D checker board slice from this
- * processes slice of the on disk large data set, and verifying that the
- * data read is correct. Verify that H5Sselect_shape_same() returns
- * true on the memory and file selections.
- *
- * The first step is to set up the needed checker board selection in the
- * in memory small small cube
- */
-
- sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1,
- tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
- sel_start);
-
- /* zero out the buffer we will be reading into */
- memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank);
- ptr_0 = tv_ptr->small_ds_slice_buf;
- for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) {
- fprintf(stdout, "%d ", (int)(*ptr_0));
- ptr_0++;
- }
- fprintf(stdout, "\n");
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read slices of the large cube.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName,
- tv_ptr->mpi_rank);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
- * process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
- * mpi_rank, and don't iterate over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
- * of this function. Thus no need for another inner loop.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(
- tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
- tv_ptr->start[4]);
- fprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
- H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
-
- ret =
- H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
- VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
-
- /* verify that expected data is retrieved */
-
- expected_value =
- (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
-
- data_ok = ckrbrd_hs_dr_pio_test__verify_data(
- tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (bool)true);
-
- VRFY((data_ok == true), "small slice read from large ds data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
- *
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * In this function, we test this by reading checker board
- * slices of the on disk small data set into slices through
- * the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- bool data_ok = false;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
- */
-
- sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank,
- tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
- sel_start);
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- fprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
-
- /* zero out the buffer we will be reading into */
- memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read the slice of the small data set
- * into different slices of the process slice of the large data
- * set.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
- *
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't iterate
- * over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(
- tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
- tv_ptr->start[4]);
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
- /* verify that the expected data and only the
- * expected data was read.
- */
- data_ok = true;
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
- start_index =
- (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- {
- int m, n;
-
- fprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank,
- expected_value);
- fprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- start_index, stop_index);
- n = 0;
- for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) {
- fprintf(stdout, "%d ", (int)(*ptr_1));
- ptr_1++;
- n++;
- if (n >= tv_ptr->edge_size) {
- fprintf(stdout, "\n");
- n = 0;
- }
- }
- fprintf(stdout, "\n");
- ptr_1 = tv_ptr->large_ds_buf_1;
- }
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
-
- assert(start_index < stop_index);
- assert(stop_index <= tv_ptr->large_ds_size);
-
- for (u = 0; u < start_index; u++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- }
-
- /* zero out the value for the next pass */
- *ptr_1 = 0;
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from small to large ds data good(1).");
-
- data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
- tv_ptr->edge_size, tv_ptr->checker_edge_size,
- expected_value, (bool)true);
-
- VRFY((data_ok == true), "slice read from small to large ds data good(2).");
-
- ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
-
- for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- }
-
- /* zero out the value for the next pass */
- *ptr_1 = 0;
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from small to large ds data good(3).");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
- *
- * Purpose: Part three of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the
- * parallel.
- *
- * Verify that we can write from memory to file using checker
- * board selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * Do this by writing small_rank - 1 dimensional checker
- * board slices from the in memory large data set to the on
- * disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify
- * that it contains the expected data. Verify that
- * H5Sselect_shape_same() returns true on the memory and
- * file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- bool data_ok = false;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* now we go in the opposite direction, verifying that we can write
- * from memory to file using selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5Sselect_shape_same() returns true on
- * the memory and file selections.
- */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
-
- sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank,
- tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
- sel_start);
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to read slices of the large cube.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* zero out the in memory small ds */
- memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- fprintf(stdout,
- "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
- fcnName);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
-
- /* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
- *
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't iterate
- * over it.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- j = 0;
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
-
- /* zero out this rank's slice of the on disk small data set */
- ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
- VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
-
- /* select the portion of the in memory large cube from which we
- * are going to write data.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(
- tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
-
- /* verify that H5Sselect_shape_same() reports the in
- * memory checkerboard selection of the slice through the
- * large dataset and the checkerboard selection of the process
- * slice of the small data set as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed.");
-
- /* write the checker board selection of the slice from the in
- * memory large data set to the slice of the on disk small
- * dataset.
- */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
- tv_ptr->start[4]);
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
- VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
- /* read the on disk process slice of the small dataset into memory */
- ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
- /* verify that expected data is retrieved */
-
- expected_value =
- (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
-
- start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
- assert(start_index < stop_index);
- assert(stop_index <= tv_ptr->small_ds_size);
-
- data_ok = true;
-
- ptr_1 = tv_ptr->small_ds_buf_1;
- for (u = 0; u < start_index; u++, ptr_1++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- *ptr_1 = 0;
- }
- }
-
- data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
- tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (bool)true);
-
- ptr_1 = tv_ptr->small_ds_buf_1;
- for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- *ptr_1 = 0;
- }
- }
-
- VRFY((data_ok == true), "large slice write slice to small slice data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
- *
- * Purpose: Part four of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the parallel.
- *
- * Verify that we can write from memory to file using
- * selections of different rank that H5Sselect_shape_same()
- * views as being of the same shape.
- *
- * Do this by writing checker board selections of the contents
- * of the process's slice of the in memory small data set to
- * slices of the on disk large data set. After each write,
- * read the process's slice of the large data set back into
- * memory, and verify that it contains the expected data.
- *
- * Verify that H5Sselect_shape_same() returns true on the
- * memory and file selections.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
-{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
-#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- bool data_ok = false;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t *ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
- * each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5Sselect_shape_same() returns true on the memory
- * and file selections.
- */
-
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
-
- for (i = 1; i < tv_ptr->large_rank; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
- tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded");
-
- /* setup a checkerboard selection of the slice of the in memory small
- * data set associated with the process's mpi rank.
- */
-
- sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank,
- tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
- sel_start);
-
- /* set up start, stride, count, and block -- note that we will
- * change start[] so as to write checkerboard selections of slices
- * of the small data set to slices of the large data set.
- */
- for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
-
- tv_ptr->start[i] = 0;
- tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
-
- tv_ptr->block[i] = 1;
- }
- else {
-
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
- }
- }
-
- /* zero out the in memory large ds */
- memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- fprintf(stdout,
- "%s writing process checkerboard selections of slices of small ds to process slices of large "
- "ds on disk.\n",
- fcnName);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
-
- i = tv_ptr->mpi_rank;
- }
- else {
-
- i = 0;
- }
-
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
- * loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
- * test.
- */
-
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
-
- j = tv_ptr->mpi_rank;
- }
- else {
-
- j = 0;
- }
-
- do {
- if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
-
- k = tv_ptr->mpi_rank;
- }
- else {
-
- k = 0;
- }
-
- do {
- /* since small rank >= 2 and large_rank > small_rank, we
- * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
- * (baring major re-orgaization), this gives us:
- *
- * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
- *
- * so no need to repeat the test in the outer loops --
- * just set l = 0.
- */
-
- l = 0;
- do {
- if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
-
- (tv_ptr->tests_skipped)++;
- }
- else { /* run the test */
-
- tv_ptr->skips = 0; /* reset the skips counter */
-
- /* we know that small_rank >= 1 and that large_rank > small_rank
- * by the assertions at the head of this function. Thus no
- * need for another inner loop.
- */
-
- /* Zero out this processes slice of the on disk large data set.
- * Note that this will leave one slice with its original data
- * as there is one more slice than processes.
- */
- ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2);
- VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
-
- /* select the portion of the in memory large cube to which we
- * are going to write data.
- */
- tv_ptr->start[0] = (hsize_t)i;
- tv_ptr->start[1] = (hsize_t)j;
- tv_ptr->start[2] = (hsize_t)k;
- tv_ptr->start[3] = (hsize_t)l;
- tv_ptr->start[4] = 0;
-
- assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
- assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(
- tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
-
- /* verify that H5Sselect_shape_same() reports the in
- * memory small data set slice selection and the
- * on disk slice through the large data set selection
- * as having the same shape.
- */
- check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
- VRFY((check == true), "H5Sselect_shape_same passed");
-
- /* write the small data set slice from memory to the
- * target slice of the disk data set
- */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
- tv_ptr->start[4]);
- fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
- H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
- H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
-
- /* read this processes slice on the on disk large
- * data set into memory.
- */
-
- ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
- tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
-
- /* verify that the expected data and only the
- * expected data was read.
- */
- expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
-
- start_index =
- (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
- assert(start_index < stop_index);
- assert(stop_index < tv_ptr->large_ds_size);
-
- data_ok = true;
-
- ptr_1 = tv_ptr->large_ds_buf_1;
- for (u = 0; u < start_index; u++, ptr_1++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- *ptr_1 = 0;
- }
- }
-
- data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
- tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (bool)true);
-
- ptr_1 = tv_ptr->large_ds_buf_1;
- for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
-
- if (*ptr_1 != 0) {
-
- data_ok = false;
- *ptr_1 = 0;
- }
- }
-
- VRFY((data_ok == true), "small ds cb slice write to large ds slice data good.");
-
- (tv_ptr->tests_run)++;
- }
-
- l++;
-
- (tv_ptr->total_tests)++;
-
- } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
- k++;
- } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
- j++;
- } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__run_test()
- *
- * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
- * different rank in the parallel.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
-
-static void
-ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
- const int chunk_edge_size, const int small_rank, const int large_rank,
- const bool use_collective_io, const hid_t dset_type, const int express_test,
- int *skips_ptr, int max_skips, int64_t *total_tests_ptr,
- int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank)
-
-{
-#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
-#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- struct hs_dr_pio_test_vars_t test_vars = {
- /* int mpi_size = */ -1,
- /* int mpi_rank = */ -1,
- /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
- /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
- /* int test_num = */ -1,
- /* int edge_size = */ -1,
- /* int checker_edge_size = */ -1,
- /* int chunk_edge_size = */ -1,
- /* int small_rank = */ -1,
- /* int large_rank = */ -1,
- /* hid_t dset_type = */ -1,
- /* uint32_t * small_ds_buf_0 = */ NULL,
- /* uint32_t * small_ds_buf_1 = */ NULL,
- /* uint32_t * small_ds_buf_2 = */ NULL,
- /* uint32_t * small_ds_slice_buf = */ NULL,
- /* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
- /* uint32_t * large_ds_buf_2 = */ NULL,
- /* uint32_t * large_ds_slice_buf = */ NULL,
- /* int small_ds_offset = */ -1,
- /* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
- /* hid_t xfer_plist = */ H5P_DEFAULT,
- /* hid_t full_mem_small_ds_sid = */ -1,
- /* hid_t full_file_small_ds_sid = */ -1,
- /* hid_t mem_small_ds_sid = */ -1,
- /* hid_t file_small_ds_sid_0 = */ -1,
- /* hid_t file_small_ds_sid_1 = */ -1,
- /* hid_t small_ds_slice_sid = */ -1,
- /* hid_t full_mem_large_ds_sid = */ -1,
- /* hid_t full_file_large_ds_sid = */ -1,
- /* hid_t mem_large_ds_sid = */ -1,
- /* hid_t file_large_ds_sid_0 = */ -1,
- /* hid_t file_large_ds_sid_1 = */ -1,
- /* hid_t file_large_ds_process_slice_sid = */ -1,
- /* hid_t mem_large_ds_process_slice_sid = */ -1,
- /* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
- /* size_t small_ds_size = */ 1,
- /* size_t small_ds_slice_size = */ 1,
- /* size_t large_ds_size = */ 1,
- /* size_t large_ds_slice_size = */ 1,
- /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
- /* hsize_t * start_ptr = */ NULL,
- /* hsize_t * stride_ptr = */ NULL,
- /* hsize_t * count_ptr = */ NULL,
- /* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
- /* int64_t total_tests = */ 0,
- /* int64_t tests_run = */ 0,
- /* int64_t tests_skipped = */ 0};
- struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
-
- if (MAINPROCESS)
- printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
- small_rank, large_rank);
-
- hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank,
- use_collective_io, dset_type, express_test, tv_ptr);
-
- /* initialize skips & max_skips */
- tv_ptr->skips = *skips_ptr;
- tv_ptr->max_skips = max_skips;
-
-#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
- fprintf(stdout, "test %d: Initialization complete.\n", test_num);
- }
-#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- /* first, verify that we can read from disk correctly using selections
- * of different rank that H5Sselect_shape_same() views as being of the
- * same shape.
- *
- * Start by reading a (small_rank - 1)-D slice from this processes slice
- * of the on disk large data set, and verifying that the data read is
- * correct. Verify that H5Sselect_shape_same() returns true on the
- * memory and file selections.
- *
- * The first step is to set up the needed checker board selection in the
- * in memory small small cube
- */
-
- ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
-
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
- */
-
- ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr);
-
- /* now we go in the opposite direction, verifying that we can write
- * from memory to file using selections of different rank that
- * H5Sselect_shape_same() views as being of the same shape.
- *
- * Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5Sselect_shape_same() returns true on
- * the memory and file selections.
- */
-
- ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
-
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
- * each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5Sselect_shape_same() returns true on the memory
- * and file selections.
- */
-
- ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr);
-
-#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", test_num,
- (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
- (long long)(tv_ptr->total_tests));
- }
-#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- hs_dr_pio_test__takedown(tv_ptr);
-
-#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if (MAINPROCESS) {
- fprintf(stdout, "test %d: Takedown complete.\n", test_num);
- }
-#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
- *skips_ptr = tv_ptr->skips;
- *total_tests_ptr += tv_ptr->total_tests;
- *tests_run_ptr += tv_ptr->tests_run;
- *tests_skipped_ptr += tv_ptr->tests_skipped;
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test__run_test() */
-
-/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test()
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
- *
- * Return: void
- *
- *-------------------------------------------------------------------------
- */
-
-static void
-ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
-{
- int express_test;
- int local_express_test;
- int mpi_size = -1;
- int mpi_rank = -1;
- int test_num = 0;
- int edge_size;
- int checker_edge_size = 3;
- int chunk_edge_size = 0;
- int small_rank = 3;
- int large_rank = 4;
- int mpi_result;
- hid_t dset_type = H5T_NATIVE_UINT;
- int skips = 0;
- int max_skips = 0;
- /* The following table list the number of sub-tests skipped between
- * each test that is actually executed as a function of the express
- * test level. Note that any value in excess of 4880 will cause all
- * sub tests to be skipped.
- */
- int max_skips_tbl[4] = {0, 4, 64, 1024};
- int64_t total_tests = 0;
- int64_t tests_run = 0;
- int64_t tests_skipped = 0;
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- edge_size = (mpi_size > 6 ? mpi_size : 6);
-
- local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
-
- HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
-
- mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
- MPI_COMM_WORLD);
-
- VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
-
- if (local_express_test < 0) {
- max_skips = max_skips_tbl[0];
- }
- else if (local_express_test > 3) {
- max_skips = max_skips_tbl[3];
- }
- else {
- max_skips = max_skips_tbl[local_express_test];
- }
-
-#if 0
- {
- int DebugWait = 1;
-
- while (DebugWait) ;
- }
-#endif
-
- for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
-
- for (small_rank = 2; small_rank < large_rank; small_rank++) {
- switch (sstest_type) {
- case IND_CONTIG:
- /* contiguous data set, independent I/O */
- chunk_edge_size = 0;
- ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, false, dset_type, express_test,
- &skips, max_skips, &total_tests, &tests_run,
- &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case IND_CONTIG */
-
- case COL_CONTIG:
- /* contiguous data set, collective I/O */
- chunk_edge_size = 0;
- ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, true, dset_type, express_test,
- &skips, max_skips, &total_tests, &tests_run,
- &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case COL_CONTIG */
-
- case IND_CHUNKED:
- /* chunked data set, independent I/O */
- chunk_edge_size = 5;
- ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, false, dset_type, express_test,
- &skips, max_skips, &total_tests, &tests_run,
- &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case IND_CHUNKED */
-
- case COL_CHUNKED:
- /* chunked data set, collective I/O */
- chunk_edge_size = 5;
- ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, true, dset_type, express_test,
- &skips, max_skips, &total_tests, &tests_run,
- &tests_skipped, mpi_rank);
- test_num++;
- break;
- /* end of case COL_CHUNKED */
-
- default:
- VRFY((false), "unknown test type");
- break;
-
- } /* end of switch(sstest_type) */
-#if CONTIG_HS_DR_PIO_TEST__DEBUG
- if ((MAINPROCESS) && (tests_skipped > 0)) {
- fprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n", tests_run,
- tests_skipped, total_tests);
- }
-#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
- }
- }
-
- if (MAINPROCESS) {
- if (tests_skipped > 0) {
- fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
- tests_skipped, total_tests);
- }
- else
- printf("\n");
- }
-
- return;
-
-} /* ckrbrd_hs_dr_pio_test() */
-
-/* Main Body. Here for now, may have to move them to a separated file later. */
-
-/*
- * Main driver of the Parallel HDF5 tests
- */
-
-/* global variables */
-int dim0;
-int dim1;
-int chunkdim0;
-int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
-int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
-int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
-
-/* other option flags */
-
-#ifdef USE_PAUSE
-/* pause the process for a moment to allow debugger to attach if desired. */
-/* Will pause more if greenlight file is not present but will eventually */
-/* continue. */
-#include <sys/types.h>
-#include <sys/stat.h>
-
-void
-pause_proc(void)
-{
-
- int pid;
- h5_stat_t statbuf;
- char greenlight[] = "go";
- int maxloop = 10;
- int loops = 0;
- int time_int = 10;
-
- /* mpi variables */
- int mpi_size, mpi_rank;
- int mpi_namelen;
- char mpi_name[MPI_MAX_PROCESSOR_NAME];
-
- pid = getpid();
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Get_processor_name(mpi_name, &mpi_namelen);
-
- if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
- if (!loops++) {
- printf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
- HDsleep(time_int);
- }
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/* Use the Profile feature of MPI to call the pause_proc() */
-int
-MPI_Init(int *argc, char ***argv)
-{
- int ret_code;
- ret_code = PMPI_Init(argc, argv);
- pause_proc();
- return (ret_code);
-}
-#endif /* USE_PAUSE */
-
-/*
- * Show command usage
- */
-static void
-usage(void)
-{
- printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
- printf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
- printf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
-#if 0
- printf("\t-f <prefix>\tfilename prefix\n");
-#endif
- printf("\t-2\t\tuse Split-file together with MPIO\n");
- printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
- COL_FACTOR);
- printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
- printf("\n");
-}
-
-/*
- * parse the command line options
- */
-static int
-parse_options(int argc, char **argv)
-{
- int mpi_size, mpi_rank; /* mpi variables */
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* setup default chunk-size. Make sure sizes are > 0 */
-
- chunkdim0 = (dim0 + 9) / 10;
- chunkdim1 = (dim1 + 9) / 10;
-
- while (--argc) {
- if (**(++argv) != '-') {
- break;
- }
- else {
- switch (*(*argv + 1)) {
- case 'm':
- ndatasets = atoi((*argv + 1) + 1);
- if (ndatasets < 0) {
- nerrors++;
- return (1);
- }
- break;
- case 'n':
- ngroups = atoi((*argv + 1) + 1);
- if (ngroups < 0) {
- nerrors++;
- return (1);
- }
- break;
-#if 0
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
-#endif
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2) {
- nerrors++;
- return (1);
- }
- dim0 = atoi(*(++argv)) * mpi_size;
- argc--;
- dim1 = atoi(*(++argv)) * mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0 + 9) / 10;
- chunkdim1 = (dim1 + 9) / 10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2) {
- nerrors++;
- return (1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return (1);
- default:
- printf("Illegal option(%s)\n", *argv);
- nerrors++;
- return (1);
- }
- }
- } /*while*/
-
- /* check validity of dimension and chunk sizes */
- if (dim0 <= 0 || dim1 <= 0) {
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return (1);
- }
- if (chunkdim0 <= 0 || chunkdim1 <= 0) {
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return (1);
- }
-
- /* Make sure datasets can be divided into equal portions by the processes */
- if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
- if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
- nerrors++;
- return (1);
- }
-
- /* compose the test filenames */
- {
- int i, n;
-
- n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i = 0; i < n; i++)
- strncpy(filenames[i], FILENAME[i], PATH_MAX);
-#if 0 /* no support for VFDs right now */
- if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
- printf("h5_fixname failed\n");
- nerrors++;
- return (1);
- }
-#endif
- if (MAINPROCESS) {
- printf("Test filenames are:\n");
- for (i = 0; i < n; i++)
- printf(" %s\n", filenames[i]);
- }
- }
-
- return (0);
-}
-
-/*
- * Create the appropriate File access property list
- */
-hid_t
-create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
-{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
-
- /* need the rank for error checking macros */
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
-
- if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
-
- if (l_facc_type == FACC_MPIO) {
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
- VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, true);
- VRFY((ret >= 0), "");
- return (ret_pl);
- }
-
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return (ret_pl);
- }
-
- /* unknown file access types */
- return (ret_pl);
-}
-
-/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */
-static void
-sscontig1(void)
-{
- contig_hs_dr_pio_test(IND_CONTIG);
-}
-
-/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */
-static void
-sscontig2(void)
-{
- contig_hs_dr_pio_test(COL_CONTIG);
-}
-
-/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */
-static void
-sscontig3(void)
-{
- contig_hs_dr_pio_test(IND_CHUNKED);
-}
-
-/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */
-static void
-sscontig4(void)
-{
- contig_hs_dr_pio_test(COL_CHUNKED);
-}
-
-/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */
-static void
-sschecker1(void)
-{
- ckrbrd_hs_dr_pio_test(IND_CONTIG);
-}
-
-/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */
-static void
-sschecker2(void)
-{
- ckrbrd_hs_dr_pio_test(COL_CONTIG);
-}
-
-/* Shape Same test using checker hyperslab using independent IO on chunked datasets */
-static void
-sschecker3(void)
-{
- ckrbrd_hs_dr_pio_test(IND_CHUNKED);
-}
-
-/* Shape Same test using checker hyperslab using collective IO on chunked datasets */
-static void
-sschecker4(void)
-{
- ckrbrd_hs_dr_pio_test(COL_CHUNKED);
-}
-
-int
-main(int argc, char **argv)
-{
- int mpi_size, mpi_rank; /* mpi variables */
-
-#ifndef H5_HAVE_WIN32_API
- /* Un-buffer the stdout and stderr */
- HDsetbuf(stderr, NULL);
- HDsetbuf(stdout, NULL);
-#endif
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- dim0 = ROW_FACTOR * mpi_size;
- dim1 = COL_FACTOR * mpi_size;
-
- if (MAINPROCESS) {
- printf("===================================\n");
- printf("Shape Same Tests Start\n");
- printf(" express_test = %d.\n", EXPRESS_MODE /* GetTestExpress() */);
- printf("===================================\n");
- }
-
- /* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hung in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
- if (H5dont_atexit() < 0) {
- if (MAINPROCESS)
- printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
- H5open();
- /* h5_show_hostname(); */
-
- fapl = H5Pcreate(H5P_FILE_ACCESS);
-
- /* Get the capability flag of the VOL connector being used */
- if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) {
- if (MAINPROCESS)
- printf("Failed to get the capability flag of the VOL connector being used\n");
-
- MPI_Finalize();
- return 0;
- }
-
- /* Make sure the connector supports the API functions being tested. This test only
- * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close,
- */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS)
- printf("API functions for basic file and dataset aren't supported with this connector\n");
-
- MPI_Finalize();
- return 0;
- }
-
-#if 0
- memset(filenames, 0, sizeof(filenames));
- for (int i = 0; i < NFILENAME; i++) {
- if (NULL == (filenames[i] = malloc(PATH_MAX))) {
- printf("couldn't allocate filename array\n");
- MPI_Abort(MPI_COMM_WORLD, -1);
- }
- }
-#endif
-
- /* Initialize testing framework */
- /* TestInit(argv[0], usage, parse_options); */
-
- if (parse_options(argc, argv)) {
- usage();
- return 1;
- }
-
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
- printf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
- }
-
- /* Shape Same tests using contiguous hyperslab */
-#if 0
- AddTest("sscontig1", sscontig1, NULL,
- "Cntg hslab, ind IO, cntg dsets", filenames[0]);
- AddTest("sscontig2", sscontig2, NULL,
- "Cntg hslab, col IO, cntg dsets", filenames[0]);
- AddTest("sscontig3", sscontig3, NULL,
- "Cntg hslab, ind IO, chnk dsets", filenames[0]);
- AddTest("sscontig4", sscontig4, NULL,
- "Cntg hslab, col IO, chnk dsets", filenames[0]);
-#endif
- if (MAINPROCESS) {
- printf("Cntg hslab, ind IO, cntg dsets\n");
- fflush(stdout);
- }
- sscontig1();
- if (MAINPROCESS) {
- printf("Cntg hslab, col IO, cntg dsets\n");
- fflush(stdout);
- }
- sscontig2();
- if (MAINPROCESS) {
- printf("Cntg hslab, ind IO, chnk dsets\n");
- fflush(stdout);
- }
- sscontig3();
- if (MAINPROCESS) {
- printf("Cntg hslab, col IO, chnk dsets\n");
- fflush(stdout);
- }
- sscontig4();
-
- /* Shape Same tests using checker board hyperslab */
-#if 0
- AddTest("sschecker1", sschecker1, NULL,
- "Check hslab, ind IO, cntg dsets", filenames[0]);
- AddTest("sschecker2", sschecker2, NULL,
- "Check hslab, col IO, cntg dsets", filenames[0]);
- AddTest("sschecker3", sschecker3, NULL,
- "Check hslab, ind IO, chnk dsets", filenames[0]);
- AddTest("sschecker4", sschecker4, NULL,
- "Check hslab, col IO, chnk dsets", filenames[0]);
-#endif
- if (MAINPROCESS) {
- printf("Check hslab, ind IO, cntg dsets\n");
- fflush(stdout);
- }
- sschecker1();
- if (MAINPROCESS) {
- printf("Check hslab, col IO, cntg dsets\n");
- fflush(stdout);
- }
- sschecker2();
- if (MAINPROCESS) {
- printf("Check hslab, ind IO, chnk dsets\n");
- fflush(stdout);
- }
- sschecker3();
- if (MAINPROCESS) {
- printf("Check hslab, col IO, chnk dsets\n");
- fflush(stdout);
- }
- sschecker4();
-
- /* Display testing information */
- /* TestInfo(argv[0]); */
-
- /* setup file access property list */
- H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
-
- /* Parse command line arguments */
- /* TestParseCmdLine(argc, argv); */
-
- /* Perform requested testing */
- /* PerformTests(); */
-
- /* make sure all processes are finished before final report, cleanup
- * and exit.
- */
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* Display test summary, if requested */
- /* if (MAINPROCESS && GetTestSummary())
- TestSummary(); */
-
- /* Clean up test files */
- /* h5_clean_files(FILENAME, fapl); */
- H5Fdelete(FILENAME[0], fapl);
- H5Pclose(fapl);
-
- /* nerrors += GetTestNumErrs(); */
-
- /* Gather errors from all processes */
- {
- int temp;
- MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors = temp;
- }
-
- if (MAINPROCESS) { /* only process 0 reports */
- printf("===================================\n");
- if (nerrors)
- printf("***Shape Same tests detected %d errors***\n", nerrors);
- else
- printf("Shape Same tests finished successfully\n");
- printf("===================================\n");
- }
-
-#if 0
- for (int i = 0; i < NFILENAME; i++) {
- free(filenames[i]);
- filenames[i] = NULL;
- }
-#endif
-
- /* close HDF5 library */
- H5close();
-
- /* Release test infrastructure */
- /* TestShutdown(); */
-
- MPI_Finalize();
-
- /* cannot just return (nerrors) because exit code is limited to 1byte */
- return (nerrors != 0);
-}
diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c
deleted file mode 100644
index e2f148c..0000000
--- a/testpar/API/t_span_tree.c
+++ /dev/null
@@ -1,2588 +0,0 @@
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- This program will test irregular hyperslab selections with collective write and read.
- The way to test whether collective write and read works is to use independent IO
- output to verify the collective output.
-
- 1) We will write two datasets with the same hyperslab selection settings;
- one in independent mode,
- one in collective mode,
- 2) We will read two datasets with the same hyperslab selection settings,
- 1. independent read to read independent output,
- independent read to read collective output,
- Compare the result,
- If the result is the same, then collective write succeeds.
- 2. collective read to read independent output,
- independent read to read independent output,
- Compare the result,
- If the result is the same, then collective read succeeds.
-
- */
-
-#include "hdf5.h"
-#if 0
-#include "H5private.h"
-#endif
-#include "testphdf5.h"
-
-#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
-
-static void coll_write_test(int chunk_factor);
-static void coll_read_test(void);
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_write
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
- * contiguous storage
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_cont_write(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_write_test(0);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in
- * contiguous storage
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_cont_read(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_read_test();
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_write
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
- * chunk storage(1 chunk)
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_simple_chunk_write(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_write_test(1);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
- * storage(1 chunk)
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_simple_chunk_read(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_read_test();
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_write
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
- * storage(4 chunks)
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_complex_chunk_write(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_write_test(4);
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
- * storage(1 chunk)
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_irregular_complex_chunk_read(void)
-{
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
- !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file dataset, or dataset more aren't supported with this "
- "connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- coll_read_test();
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_write_test
- *
- * Purpose: To test the collectively irregular hyperslab write in chunk
- * storage
- * Input: number of chunks on each dimension
- * if number is equal to 0, contiguous storage
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-void
-coll_write_test(int chunk_factor)
-{
-
- const char *filename;
- hid_t facc_plist, dxfer_plist, dcrt_plist;
- hid_t file, datasetc, dataseti; /* File and dataset identifiers */
- hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */
-
- hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
- hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
- * read selection from the dataset on the disk
- */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
- hsize_t chunk_dims[2];
-
- herr_t ret;
- int i;
- int fillvalue = 0; /* Fill value for the dataset */
-
- int *matrix_out = NULL;
- int *matrix_out1 = NULL; /* Buffer to read from the dataset */
- int *vector = NULL;
-
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- /*set up MPI parameters */
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /* Obtain file name */
- filename = PARATESTFILE /* GetTestParameters() */;
-
- /*
- * Buffers' initialization.
- */
-
- mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size);
- mdim[0] = MSPACE_DIM1;
- mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
- fsdim[0] = FSPACE_DIM1;
- fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size);
-
- vector = (int *)malloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
- matrix_out = (int *)malloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
- matrix_out1 = (int *)malloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
-
- memset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
- vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1;
- for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++)
- vector[i] = (int)i;
-
- /* Grab file access property list */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0), "");
-
- /*
- * Create a file.
- */
- file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
- VRFY((file >= 0), "H5Fcreate succeeded");
-
- /*
- * Create property list for a dataset and set up fill values.
- */
- dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcrt_plist >= 0), "");
-
- ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
- VRFY((ret >= 0), "Fill value creation property list succeeded");
-
- if (chunk_factor != 0) {
- chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
- chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
- ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
- VRFY((ret >= 0), "chunk creation property list succeeded");
- }
-
- /*
- *
- * Create dataspace for the first dataset in the disk.
- * dim1 = 9
- * dim2 = 3600
- *
- *
- */
- fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
- VRFY((fspaceid >= 0), "file dataspace created succeeded");
-
- /*
- * Create dataset in the file. Notice that creation
- * property list dcrt_plist is used.
- */
- datasetc =
- H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
- VRFY((datasetc >= 0), "dataset created succeeded");
-
- dataseti =
- H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
- VRFY((dataseti >= 0), "dataset created succeeded");
-
- /* The First selection for FILE
- *
- * block (3,2)
- * stride(4,3)
- * count (1,768/mpi_size)
- * start (0,1+768*3*mpi_rank/mpi_size)
- *
- */
-
- start[0] = FHSTART0;
- start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
- stride[0] = FHSTRIDE0;
- stride[1] = FHSTRIDE1;
- count[0] = FHCOUNT0;
- count[1] = FHCOUNT1;
- block[0] = FHBLOCK0;
- block[1] = FHBLOCK1;
-
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* The Second selection for FILE
- *
- * block (3,768)
- * stride (1,1)
- * count (1,1)
- * start (4,768*mpi_rank/mpi_size)
- *
- */
-
- start[0] = SHSTART0;
- start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank);
- stride[0] = SHSTRIDE0;
- stride[1] = SHSTRIDE1;
- count[0] = SHCOUNT0;
- count[1] = SHCOUNT1;
- block[0] = SHBLOCK0;
- block[1] = SHBLOCK1;
-
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Create dataspace for the first dataset in the memory
- * dim1 = 27000
- *
- */
- mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
- VRFY((mspaceid1 >= 0), "memory dataspace created succeeded");
-
- /*
- * Memory space is 1-D, this is a good test to check
- * whether a span-tree derived datatype needs to be built.
- * block 1
- * stride 1
- * count 6912/mpi_size
- * start 1
- *
- */
- start[0] = MHSTART0;
- stride[0] = MHSTRIDE0;
- count[0] = MHCOUNT0;
- block[0] = MHBLOCK0;
-
- ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* independent write */
- ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
- VRFY((ret >= 0), "dataset independent write succeed");
-
- dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxfer_plist >= 0), "");
-
- ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "MPIO data transfer property list succeed");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* collective write */
- ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
- VRFY((ret >= 0), "dataset collective write succeed");
-
- ret = H5Sclose(mspaceid1);
- VRFY((ret >= 0), "");
-
- ret = H5Sclose(fspaceid);
- VRFY((ret >= 0), "");
-
- /*
- * Close dataset.
- */
- ret = H5Dclose(datasetc);
- VRFY((ret >= 0), "");
-
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0), "");
-
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0), "");
- /*
- * Close property list
- */
-
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0), "");
- ret = H5Pclose(dxfer_plist);
- VRFY((ret >= 0), "");
- ret = H5Pclose(dcrt_plist);
- VRFY((ret >= 0), "");
-
- /*
- * Open the file.
- */
-
- /***
-
- For testing collective hyperslab selection write
- In this test, we are using independent read to check
- the correctness of collective write compared with
- independent write,
-
- In order to thoroughly test this feature, we choose
- a different selection set for reading the data out.
-
-
- ***/
-
- /* Obtain file access property list with MPI-IO driver */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0), "");
-
- file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
- VRFY((file >= 0), "H5Fopen succeeded");
-
- /*
- * Open the dataset.
- */
- datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT);
- VRFY((datasetc >= 0), "H5Dopen2 succeeded");
-
- dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
- VRFY((dataseti >= 0), "H5Dopen2 succeeded");
-
- /*
- * Get dataspace of the open dataset.
- */
- fspaceid = H5Dget_space(datasetc);
- VRFY((fspaceid >= 0), "file dataspace obtained succeeded");
-
- fspaceid1 = H5Dget_space(dataseti);
- VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
-
- /* The First selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (1,2+768*mpi_rank/mpi_size)
- *
- */
- start[0] = RFFHSTART0;
- start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
- block[0] = RFFHBLOCK0;
- block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
- stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1;
-
- /* The first selection of the dataset generated by collective write */
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* The first selection of the dataset generated by independent write */
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* The Second selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,1536/mpi_size)
- * start (2,4+1536*mpi_rank/mpi_size)
- *
- */
-
- start[0] = RFSHSTART0;
- start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
- stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1;
-
- /* The second selection of the dataset generated by collective write */
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* The second selection of the dataset generated by independent write */
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Create memory dataspace.
- * rank = 2
- * mdim1 = 9
- * mdim2 = 3600
- *
- */
- mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
-
- /*
- * Select two hyperslabs in memory. Hyperslabs have the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The first selection
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (0,768*mpi_rank/mpi_size)
- *
- */
-
- start[0] = RMFHSTART0;
- start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
- block[0] = RMFHBLOCK0;
- block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
- stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1;
-
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The second selection
- * block (1,1)
- * stride(1,1)
- * count (3,1536/mpi_size)
- * start (1,2+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RMSHSTART0;
- start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
- block[0] = RMSHBLOCK0;
- block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
- stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1;
-
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Initialize data buffer.
- */
-
- memset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
- memset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
- /*
- * Read data back to the buffer matrix_out.
- */
-
- ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out);
- VRFY((ret >= 0), "H5D independent read succeed");
-
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0), "H5D independent read succeed");
-
- ret = 0;
-
- for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
- if (matrix_out[i] != matrix_out1[i])
- ret = -1;
- if (ret < 0)
- break;
- }
-
- VRFY((ret >= 0), "H5D irregular collective write succeed");
-
- /*
- * Close memory file and memory dataspaces.
- */
- ret = H5Sclose(mspaceid);
- VRFY((ret >= 0), "");
- ret = H5Sclose(fspaceid);
- VRFY((ret >= 0), "");
-
- /*
- * Close dataset.
- */
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0), "");
-
- ret = H5Dclose(datasetc);
- VRFY((ret >= 0), "");
-
- /*
- * Close property list
- */
-
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0), "");
-
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0), "");
-
- if (vector)
- free(vector);
- if (matrix_out)
- free(matrix_out);
- if (matrix_out1)
- free(matrix_out1);
-
- return;
-}
-
-/*-------------------------------------------------------------------------
- * Function: coll_read_test
- *
- * Purpose: To test the collectively irregular hyperslab read in chunk
- * storage
- * Input: number of chunks on each dimension
- * if number is equal to 0, contiguous storage
- * Return: Success: 0
- *
- * Failure: -1
- *
- *-------------------------------------------------------------------------
- */
-static void
-coll_read_test(void)
-{
-
- const char *filename;
- hid_t facc_plist, dxfer_plist;
- hid_t file, dataseti; /* File and dataset identifiers */
- hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
-
- /* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
- * read selection from the dataset on the disk
- */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
- herr_t ret;
-
- int i;
-
- int *matrix_out;
- int *matrix_out1; /* Buffer to read from the dataset */
-
- int mpi_size, mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- /*set up MPI parameters */
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /* Obtain file name */
- filename = PARATESTFILE /* GetTestParameters() */;
-
- /* Initialize the buffer */
-
- mdim[0] = MSPACE_DIM1;
- mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
- matrix_out = (int *)malloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
- matrix_out1 = (int *)malloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
-
- /*** For testing collective hyperslab selection read ***/
-
- /* Obtain file access property list */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0), "");
-
- /*
- * Open the file.
- */
- file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
- VRFY((file >= 0), "H5Fopen succeeded");
-
- /*
- * Open the dataset.
- */
- dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
- VRFY((dataseti >= 0), "H5Dopen2 succeeded");
-
- /*
- * Get dataspace of the open dataset.
- */
- fspaceid1 = H5Dget_space(dataseti);
- VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
-
- /* The First selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (1,2+768*mpi_rank/mpi_size)
- *
- */
- start[0] = RFFHSTART0;
- start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
- block[0] = RFFHBLOCK0;
- block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
- stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1;
-
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /* The Second selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,1536/mpi_size)
- * start (2,4+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RFSHSTART0;
- start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
- stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1;
-
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Create memory dataspace.
- */
- mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
-
- /*
- * Select two hyperslabs in memory. Hyperslabs have the same
- * size and shape as the selected hyperslabs for the file dataspace.
- * Only the starting point is different.
- * The first selection
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (0,768*mpi_rank/mpi_size)
- *
- */
-
- start[0] = RMFHSTART0;
- start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
- block[0] = RMFHBLOCK0;
- block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
- stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1;
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The second selection
- * block (1,1)
- * stride(1,1)
- * count (3,1536/mpi_size)
- * start (1,2+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RMSHSTART0;
- start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
- block[0] = RMSHBLOCK0;
- block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
- stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1;
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "hyperslab selection succeeded");
-
- /*
- * Initialize data buffer.
- */
-
- memset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
- memset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
-
- /*
- * Read data back to the buffer matrix_out.
- */
-
- dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxfer_plist >= 0), "");
-
- ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "MPIO data transfer property list succeed");
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "set independent IO collectively succeeded");
- }
-
- /* Collective read */
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out);
- VRFY((ret >= 0), "H5D collecive read succeed");
-
- ret = H5Pclose(dxfer_plist);
- VRFY((ret >= 0), "");
-
- /* Independent read */
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0), "H5D independent read succeed");
-
- ret = 0;
- for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
- if (matrix_out[i] != matrix_out1[i])
- ret = -1;
- if (ret < 0)
- break;
- }
- VRFY((ret >= 0), "H5D contiguous irregular collective read succeed");
-
- /*
- * Free read buffers.
- */
- free(matrix_out);
- free(matrix_out1);
-
- /*
- * Close memory file and memory dataspaces.
- */
- ret = H5Sclose(mspaceid);
- VRFY((ret >= 0), "");
- ret = H5Sclose(fspaceid1);
- VRFY((ret >= 0), "");
-
- /*
- * Close dataset.
- */
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0), "");
-
- /*
- * Close property list
- */
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0), "");
-
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0), "");
-
- return;
-}
-
-/****************************************************************
-**
-** lower_dim_size_comp_test__select_checker_board():
-**
-** Given a dataspace of tgt_rank, and dimensions:
-**
-** (mpi_size + 1), edge_size, ... , edge_size
-**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the dataspace parallel to the
-** sel_rank fastest changing indices, with origin (in the
-** higher indices) as indicated by the start array.
-**
-** Note that this function is hard-coded to presume a
-** maximum dataspace rank of 5.
-**
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
-** the value of the constant.
-**
-** JRM -- 11/11/09
-**
-****************************************************************/
-
-#define LDSCT_DS_RANK 5
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
-#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
-#endif
-
-#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
-
-static void
-lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
- const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size,
- const int sel_rank, hsize_t sel_start[])
-{
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
-#endif
- bool first_selection = true;
- int i, j, k, l, m;
- int ds_offset;
- int sel_offset;
- const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
- /* this changes */
- hsize_t base_count;
- hsize_t offset_count;
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- herr_t ret; /* Generic return value */
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank,
- (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- assert(0 < checker_edge_size);
- assert(0 < sel_rank);
- assert(sel_rank <= tgt_rank);
- assert(tgt_rank <= test_max_rank);
- assert(test_max_rank <= LDSCT_DS_RANK);
-
- sel_offset = test_max_rank - sel_rank;
- assert(sel_offset >= 0);
-
- ds_offset = test_max_rank - tgt_rank;
- assert(ds_offset >= 0);
- assert(ds_offset <= sel_offset);
-
- assert((hsize_t)checker_edge_size <= dims[sel_offset]);
- assert(dims[sel_offset] == 10);
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
- fprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- /* First, compute the base count (which assumes start == 0
- * for the associated offset) and offset_count (which
- * assumes start == checker_edge_size for the associated
- * offset).
- *
- * Note that the following computation depends on the C99
- * requirement that integer division discard any fraction
- * (truncation towards zero) to function correctly. As we
- * now require C99, this shouldn't be a problem, but note
- * it may save us some pain if we are ever obliged to support
- * pre-C99 compilers again.
- */
-
- base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
-
- if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) {
-
- base_count++;
- }
-
- offset_count =
- (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2)));
-
- if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) {
-
- offset_count++;
- }
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count,
- offset_count);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- /* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
- * the checkerboard.
- */
- i = 0;
- while (i < ds_offset) {
-
- /* these values should never be used */
- start[i] = 0;
- stride[i] = 0;
- count[i] = 0;
- block[i] = 0;
-
- i++;
- }
-
- while (i < sel_offset) {
-
- start[i] = sel_start[i];
- stride[i] = 2 * dims[i];
- count[i] = 1;
- block[i] = 1;
-
- i++;
- }
-
- while (i < test_max_rank) {
-
- stride[i] = (hsize_t)(2 * checker_edge_size);
- block[i] = (hsize_t)checker_edge_size;
-
- i++;
- }
-
- i = 0;
- do {
- if (0 >= sel_offset) {
-
- if (i == 0) {
-
- start[0] = 0;
- count[0] = base_count;
- }
- else {
-
- start[0] = (hsize_t)checker_edge_size;
- count[0] = offset_count;
- }
- }
-
- j = 0;
- do {
- if (1 >= sel_offset) {
-
- if (j == 0) {
-
- start[1] = 0;
- count[1] = base_count;
- }
- else {
-
- start[1] = (hsize_t)checker_edge_size;
- count[1] = offset_count;
- }
- }
-
- k = 0;
- do {
- if (2 >= sel_offset) {
-
- if (k == 0) {
-
- start[2] = 0;
- count[2] = base_count;
- }
- else {
-
- start[2] = (hsize_t)checker_edge_size;
- count[2] = offset_count;
- }
- }
-
- l = 0;
- do {
- if (3 >= sel_offset) {
-
- if (l == 0) {
-
- start[3] = 0;
- count[3] = base_count;
- }
- else {
-
- start[3] = (hsize_t)checker_edge_size;
- count[3] = offset_count;
- }
- }
-
- m = 0;
- do {
- if (4 >= sel_offset) {
-
- if (m == 0) {
-
- start[4] = 0;
- count[4] = base_count;
- }
- else {
-
- start[4] = (hsize_t)checker_edge_size;
- count[4] = offset_count;
- }
- }
-
- if (((i + j + k + l + m) % 2) == 0) {
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
-
- fprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
- (int)first_selection);
- fprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i,
- j, k, l, m);
- fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)start[0], (int)start[1], (int)start[2], (int)start[3],
- (int)start[4]);
- fprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
- (int)stride[4]);
- fprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)count[0], (int)count[1], (int)count[2], (int)count[3],
- (int)count[4]);
- fprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
- (int)block[0], (int)block[1], (int)block[2], (int)block[3],
- (int)block[4]);
- fprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
- H5Sget_simple_extent_ndims(tgt_sid));
- fprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank);
- }
-#endif
-
- if (first_selection) {
-
- first_selection = false;
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
- &(stride[ds_offset]), &(count[ds_offset]),
- &(block[ds_offset]));
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
- }
- else {
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]),
- &(stride[ds_offset]), &(count[ds_offset]),
- &(block[ds_offset]));
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
- }
- }
-
- m++;
-
- } while ((m <= 1) && (4 >= sel_offset));
-
- l++;
-
- } while ((l <= 1) && (3 >= sel_offset));
-
- k++;
-
- } while ((k <= 1) && (2 >= sel_offset));
-
- j++;
-
- } while ((j <= 1) && (1 >= sel_offset));
-
- i++;
-
- } while ((i <= 1) && (0 >= sel_offset));
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(tgt_sid));
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- /* Clip the selection back to the dataspace proper. */
-
- for (i = 0; i < test_max_rank; i++) {
-
- start[i] = 0;
- stride[i] = dims[i];
- count[i] = 1;
- block[i] = dims[i];
- }
-
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
-
- VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(tgt_sid));
- fprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
-
- return;
-
-} /* lower_dim_size_comp_test__select_checker_board() */
-
-/****************************************************************
-**
-** lower_dim_size_comp_test__verify_data():
-**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return true if it does, and false
-** otherwise.
-**
-** The supplied buffer is presumed to be this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
-**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checkerboard selection of an m (1 <= m <
-** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indices.
-**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
-**
-** Thus, for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
-**
-** (10 * 10 * x) + (10 * y) + z
-**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialized (prior
-** to the checkerboard selection) as follows:
-**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
-**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
-**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube opposite the origin.)
-**
-****************************************************************/
-
-#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
-
-static bool
-lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- const int mpi_rank,
-#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- const int rank, const int edge_size, const int checker_edge_size,
- uint32_t first_expected_val, bool buf_starts_in_checker)
-{
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- const char *fcnName = "lower_dim_size_comp_test__verify_data():";
-#endif
- bool good_data = true;
- bool in_checker;
- bool start_in_checker[5];
- uint32_t expected_value;
- uint32_t *val_ptr;
- int i, j, k, l, m; /* to track position in n-cube */
- int v, w, x, y, z; /* to track position in checker */
- const int test_max_rank = 5; /* code changes needed if this is increased */
-
- assert(buf_ptr != NULL);
- assert(0 < rank);
- assert(rank <= test_max_rank);
- assert(edge_size >= 6);
- assert(0 < checker_edge_size);
- assert(checker_edge_size <= edge_size);
- assert(test_max_rank <= LDSCT_DS_RANK);
-
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s rank = %d.\n", fcnName, rank);
- fprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
- fprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
- fprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
- fprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
- }
-#endif
-
- val_ptr = buf_ptr;
- expected_value = first_expected_val;
-
- i = 0;
- v = 0;
- start_in_checker[0] = buf_starts_in_checker;
- do {
- if (v >= checker_edge_size) {
-
- start_in_checker[0] = !start_in_checker[0];
- v = 0;
- }
-
- j = 0;
- w = 0;
- start_in_checker[1] = start_in_checker[0];
- do {
- if (w >= checker_edge_size) {
-
- start_in_checker[1] = !start_in_checker[1];
- w = 0;
- }
-
- k = 0;
- x = 0;
- start_in_checker[2] = start_in_checker[1];
- do {
- if (x >= checker_edge_size) {
-
- start_in_checker[2] = !start_in_checker[2];
- x = 0;
- }
-
- l = 0;
- y = 0;
- start_in_checker[3] = start_in_checker[2];
- do {
- if (y >= checker_edge_size) {
-
- start_in_checker[3] = !start_in_checker[3];
- y = 0;
- }
-
- m = 0;
- z = 0;
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
- }
-#endif
- in_checker = start_in_checker[3];
- do {
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, " %d", (int)(*val_ptr));
- }
-#endif
- if (z >= checker_edge_size) {
-
- in_checker = !in_checker;
- z = 0;
- }
-
- if (in_checker) {
-
- if (*val_ptr != expected_value) {
-
- good_data = false;
- }
-
- /* zero out buffer for reuse */
- *val_ptr = 0;
- }
- else if (*val_ptr != 0) {
-
- good_data = false;
-
- /* zero out buffer for reuse */
- *val_ptr = 0;
- }
-
- val_ptr++;
- expected_value++;
- m++;
- z++;
-
- } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "\n");
- }
-#endif
- l++;
- y++;
- } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
- k++;
- x++;
- } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
- j++;
- w++;
- } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
- i++;
- v++;
- } while ((rank >= test_max_rank) && (i < edge_size));
-
- return (good_data);
-
-} /* lower_dim_size_comp_test__verify_data() */
-
-/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test__run_test()
- *
- * Purpose: Verify that a bug in the computation of the size of the
- * lower dimensions of a dataspace in H5S_obtain_datatype()
- * has been corrected.
- *
- * Return: void
- *-------------------------------------------------------------------------
- */
-
-#define LDSCT_DS_RANK 5
-
-static void
-lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_collective_io,
- const hid_t dset_type)
-{
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- const char *fcnName = "lower_dim_size_comp_test__run_test()";
- int rank;
- hsize_t dims[32];
- hsize_t max_dims[32];
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- const char *filename;
- bool data_ok = false;
- bool mis_match = false;
- int i;
- int start_index;
- int stop_index;
- int mrc;
- int mpi_rank;
- int mpi_size;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist = H5P_DEFAULT;
- size_t small_ds_size;
- size_t small_ds_slice_size;
- size_t large_ds_size;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- size_t large_ds_slice_size;
-#endif
- uint32_t expected_value;
- uint32_t *small_ds_buf_0 = NULL;
- uint32_t *small_ds_buf_1 = NULL;
- uint32_t *large_ds_buf_0 = NULL;
- uint32_t *large_ds_buf_1 = NULL;
- uint32_t *ptr_0;
- uint32_t *ptr_1;
- hsize_t small_chunk_dims[LDSCT_DS_RANK];
- hsize_t large_chunk_dims[LDSCT_DS_RANK];
- hsize_t small_dims[LDSCT_DS_RANK];
- hsize_t large_dims[LDSCT_DS_RANK];
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- hsize_t small_sel_start[LDSCT_DS_RANK];
- hsize_t large_sel_start[LDSCT_DS_RANK];
- hid_t full_mem_small_ds_sid;
- hid_t full_file_small_ds_sid;
- hid_t mem_small_ds_sid;
- hid_t file_small_ds_sid;
- hid_t full_mem_large_ds_sid;
- hid_t full_file_large_ds_sid;
- hid_t mem_large_ds_sid;
- hid_t file_large_ds_sid;
- hid_t small_ds_dcpl_id = H5P_DEFAULT;
- hid_t large_ds_dcpl_id = H5P_DEFAULT;
- hid_t small_dataset; /* Dataset ID */
- hid_t large_dataset; /* Dataset ID */
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- assert(mpi_size >= 1);
-
- mpi_comm = MPI_COMM_WORLD;
- mpi_info = MPI_INFO_NULL;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size);
- fprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
- small_ds_slice_size = (size_t)(1 * 1 * 10 * 10);
- large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- large_ds_slice_size = (size_t)(10 * 10 * 10 * 10);
-
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
- (int)small_ds_size, (int)small_ds_slice_size);
- fprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
- (int)large_ds_size, (int)large_ds_slice_size);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- /* Allocate buffers */
- small_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * small_ds_size);
- VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
-
- small_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * small_ds_size);
- VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
-
- large_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * large_ds_size);
- VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
-
- large_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * large_ds_size);
- VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
-
- /* initialize the buffers */
-
- ptr_0 = small_ds_buf_0;
- ptr_1 = small_ds_buf_1;
-
- for (i = 0; i < (int)small_ds_size; i++) {
-
- *ptr_0 = (uint32_t)i;
- *ptr_1 = 0;
-
- ptr_0++;
- ptr_1++;
- }
-
- ptr_0 = large_ds_buf_0;
- ptr_1 = large_ds_buf_1;
-
- for (i = 0; i < (int)large_ds_size; i++) {
-
- *ptr_0 = (uint32_t)i;
- *ptr_1 = 0;
-
- ptr_0++;
- ptr_1++;
- }
-
- /* get the file name */
-
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- assert(filename != NULL);
-
- /* ----------------------------------------
- * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
- * ---------------------------------------*/
- /* setup file access template */
- acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
-
- /* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- MESG("File opened.");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
-
- /* setup dims: */
- small_dims[0] = (hsize_t)(mpi_size + 1);
- small_dims[1] = 1;
- small_dims[2] = 1;
- small_dims[3] = 10;
- small_dims[4] = 10;
-
- large_dims[0] = (hsize_t)(mpi_size + 1);
- large_dims[1] = 10;
- large_dims[2] = 10;
- large_dims[3] = 10;
- large_dims[4] = 10;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0],
- (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
- fprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0],
- (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
- }
-#endif
-
- /* create dataspaces */
-
- full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
-
- full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
-
- mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
-
- file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded");
-
- full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
-
- full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded");
-
- mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded");
-
- file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded");
-
- /* Select the entire extent of the full small ds dataspaces */
- ret = H5Sselect_all(full_mem_small_ds_sid);
- VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
-
- ret = H5Sselect_all(full_file_small_ds_sid);
- VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
-
- /* Select the entire extent of the full large ds dataspaces */
- ret = H5Sselect_all(full_mem_large_ds_sid);
- VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
-
- ret = H5Sselect_all(full_file_large_ds_sid);
- VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
-
- /* if chunk edge size is greater than zero, set up the small and
- * large data set creation property lists to specify chunked
- * datasets.
- */
- if (chunk_edge_size > 0) {
-
- small_chunk_dims[0] = (hsize_t)(1);
- small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
- small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
- (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2],
- (int)small_chunk_dims[3], (int)small_chunk_dims[4]);
- }
-#endif
-
- small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
-
- ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
- VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
-
- ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims);
- VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
-
- large_chunk_dims[0] = (hsize_t)(1);
- large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] =
- (hsize_t)chunk_edge_size;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
- (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2],
- (int)large_chunk_dims[3], (int)large_chunk_dims[4]);
- }
-#endif
-
- large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
-
- ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
- VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
-
- ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims);
- VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
- }
-
- /* create the small dataset */
- small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT,
- small_ds_dcpl_id, H5P_DEFAULT);
- VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded");
-
- /* create the large dataset */
- large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT,
- large_ds_dcpl_id, H5P_DEFAULT);
- VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset,
- (int)large_dataset);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- /* setup xfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- if (!use_collective_io) {
-
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded");
- }
-
- /* setup selection to write initial data to the small data sets */
- start[0] = (hsize_t)(mpi_rank + 1);
- start[1] = start[2] = start[3] = start[4] = 0;
-
- stride[0] = (hsize_t)(2 * (mpi_size + 1));
- stride[1] = stride[2] = 2;
- stride[3] = stride[4] = 2 * 10;
-
- count[0] = count[1] = count[2] = count[3] = count[4] = 1;
-
- block[0] = block[1] = block[2] = 1;
- block[3] = block[4] = 10;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
- (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
- fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- /* setup selections for writing initial data to the small data set */
- ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
-
- ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded");
-
- if (MAINPROCESS) { /* add an additional slice to the selections */
-
- start[0] = 0;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
- (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
- fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
- (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
- fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
- (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
- fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
- (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
-
- ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded");
- }
-
- check = H5Sselect_valid(mem_small_ds_sid);
- VRFY((check == true), "H5Sselect_valid(mem_small_ds_sid) returns true");
-
- check = H5Sselect_valid(file_small_ds_sid);
- VRFY((check == true), "H5Sselect_valid(file_small_ds_sid) returns true");
-
- /* write the initial value of the small data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0);
- VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
-
- /* sync with the other processes before reading data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
-
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
- * data set and verifies it.
- */
- ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist,
- small_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
-
- /* sync with the other processes before checking data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
-
- /* verify that the correct data was written to the small data set,
- * and reset the buffer to zero in passing.
- */
- expected_value = 0;
- mis_match = false;
- ptr_1 = small_ds_buf_1;
-
- i = 0;
- for (i = 0; i < (int)small_ds_size; i++) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
-
- *ptr_1 = (uint32_t)0;
-
- ptr_1++;
- expected_value++;
- }
- VRFY((mis_match == false), "small ds init data good.");
-
- /* setup selections for writing initial data to the large data set */
- start[0] = (hsize_t)(mpi_rank + 1);
- start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
-
- stride[0] = (hsize_t)(2 * (mpi_size + 1));
- stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
-
- count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
-
- block[0] = (hsize_t)1;
- block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
- (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
- fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
-
- ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(mem_large_ds_sid));
- fprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(file_large_ds_sid));
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- if (MAINPROCESS) { /* add an additional slice to the selections */
-
- start[0] = (hsize_t)0;
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
- (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
- fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
- (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
- fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
- (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
- fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
- (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
-
- ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(mem_large_ds_sid));
- fprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
- (int)H5Sget_select_npoints(file_large_ds_sid));
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- }
-
- /* try clipping the selection back to the large dataspace proper */
- start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
-
- stride[0] = (hsize_t)(2 * (mpi_size + 1));
- stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
-
- count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
-
- block[0] = (hsize_t)(mpi_size + 1);
- block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
-
- ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
- VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
-
- ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
- VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
-
- rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
- fprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
- (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
-
- rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
- fprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
- (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- check = H5Sselect_valid(mem_large_ds_sid);
- VRFY((check == true), "H5Sselect_valid(mem_large_ds_sid) returns true");
-
- check = H5Sselect_valid(file_large_ds_sid);
- VRFY((check == true), "H5Sselect_valid(file_large_ds_sid) returns true");
-
- /* write the initial value of the large data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank);
- fprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset);
- fprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank,
- (int)mem_large_ds_sid, (int)file_large_ds_sid);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stderr);
- VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
-
- /* sync with the other processes before checking data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
-
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
- * data set.
- */
- ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist,
- large_ds_buf_1);
- VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
-
- /* verify that the correct data was written to the large data set.
- * in passing, reset the buffer to zeros
- */
- expected_value = 0;
- mis_match = false;
- ptr_1 = large_ds_buf_1;
-
- i = 0;
- for (i = 0; i < (int)large_ds_size; i++) {
-
- if (*ptr_1 != expected_value) {
-
- mis_match = true;
- }
-
- *ptr_1 = (uint32_t)0;
-
- ptr_1++;
- expected_value++;
- }
- VRFY((mis_match == false), "large ds init data good.");
-
- /***********************************/
- /***** INITIALIZATION COMPLETE *****/
- /***********************************/
-
- /* read a checkerboard selection of the process slice of the
- * small on disk data set into the process slice of the large
- * in memory data set, and verify the data read.
- */
-
- small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
-
- lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid,
- /* tgt_rank = */ 5, small_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank */ 2, small_sel_start);
-
- expected_value =
- (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
- (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
- (small_sel_start[2] * small_dims[3] * small_dims[4]) +
- (small_sel_start[3] * small_dims[4]) + (small_sel_start[4]));
-
- large_sel_start[0] = (hsize_t)(mpi_rank + 1);
- large_sel_start[1] = 5;
- large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
-
- lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid,
- /* tgt_rank = */ 5, large_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank = */ 2, large_sel_start);
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed (1)");
-
- ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
- large_ds_buf_1);
-
- VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- /* verify that expected data is retrieved */
-
- data_ok = true;
-
- start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
-
- stop_index = start_index + (int)small_ds_slice_size;
-
- assert(0 <= start_index);
- assert(start_index < stop_index);
- assert(stop_index <= (int)large_ds_size);
-
- ptr_1 = large_ds_buf_1;
-
- for (i = 0; i < start_index; i++) {
-
- if (*ptr_1 != (uint32_t)0) {
-
- data_ok = false;
- *ptr_1 = (uint32_t)0;
- }
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from small ds data good(1).");
-
- data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- mpi_rank,
-#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- /* rank */ 2,
- /* edge_size */ 10,
- /* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ true);
-
- VRFY((data_ok == true), "slice read from small ds data good(2).");
-
- data_ok = true;
-
- ptr_1 += small_ds_slice_size;
-
- for (i = stop_index; i < (int)large_ds_size; i++) {
-
- if (*ptr_1 != (uint32_t)0) {
-
- data_ok = false;
- *ptr_1 = (uint32_t)0;
- }
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from small ds data good(3).");
-
- /* read a checkerboard selection of a slice of the process slice of
- * the large on disk data set into the process slice of the small
- * in memory data set, and verify the data read.
- */
-
- small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
-
- lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid,
- /* tgt_rank = */ 5, small_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank */ 2, small_sel_start);
-
- large_sel_start[0] = (hsize_t)(mpi_rank + 1);
- large_sel_start[1] = 5;
- large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
-
- lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid,
- /* tgt_rank = */ 5, large_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank = */ 2, large_sel_start);
-
- /* verify that H5Sselect_shape_same() reports the two
- * selections as having the same shape.
- */
- check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
- VRFY((check == true), "H5Sselect_shape_same passed (2)");
-
- ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
- small_ds_buf_1);
-
- VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
-
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- /* verify that expected data is retrieved */
-
- data_ok = true;
-
- expected_value =
- (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
-
- start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size;
-
- stop_index = start_index + (int)small_ds_slice_size;
-
- assert(0 <= start_index);
- assert(start_index < stop_index);
- assert(stop_index <= (int)small_ds_size);
-
- ptr_1 = small_ds_buf_1;
-
- for (i = 0; i < start_index; i++) {
-
- if (*ptr_1 != (uint32_t)0) {
-
- data_ok = false;
- *ptr_1 = (uint32_t)0;
- }
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from large ds data good(1).");
-
- data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- mpi_rank,
-#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- /* rank */ 2,
- /* edge_size */ 10,
- /* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ true);
-
- VRFY((data_ok == true), "slice read from large ds data good(2).");
-
- data_ok = true;
-
- ptr_1 += small_ds_slice_size;
-
- for (i = stop_index; i < (int)small_ds_size; i++) {
-
- if (*ptr_1 != (uint32_t)0) {
-
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
- fprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i,
- (int)(*ptr_1));
- }
-#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
-
- data_ok = false;
- *ptr_1 = (uint32_t)0;
- }
-
- ptr_1++;
- }
-
- VRFY((data_ok == true), "slice read from large ds data good(3).");
-
- /* Close dataspaces */
- ret = H5Sclose(full_mem_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
-
- ret = H5Sclose(full_file_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
-
- ret = H5Sclose(mem_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
-
- ret = H5Sclose(file_small_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
-
- ret = H5Sclose(full_mem_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
-
- ret = H5Sclose(full_file_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
-
- ret = H5Sclose(mem_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
-
- ret = H5Sclose(file_large_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded");
-
- /* Close Datasets */
- ret = H5Dclose(small_dataset);
- VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
-
- ret = H5Dclose(large_dataset);
- VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
-
- /* close the file collectively */
- MESG("about to close file.");
- ret = H5Fclose(fid);
- VRFY((ret != FAIL), "file close succeeded");
-
- /* Free memory buffers */
- if (small_ds_buf_0 != NULL)
- free(small_ds_buf_0);
- if (small_ds_buf_1 != NULL)
- free(small_ds_buf_1);
-
- if (large_ds_buf_0 != NULL)
- free(large_ds_buf_0);
- if (large_ds_buf_1 != NULL)
- free(large_ds_buf_1);
-
- return;
-
-} /* lower_dim_size_comp_test__run_test() */
-
-/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test()
- *
- * Purpose: Test to see if an error in the computation of the size
- * of the lower dimensions in H5S_obtain_datatype() has
- * been corrected.
- *
- * Return: void
- *-------------------------------------------------------------------------
- */
-
-void
-lower_dim_size_comp_test(void)
-{
- /* const char *fcnName = "lower_dim_size_comp_test()"; */
- int chunk_edge_size = 0;
- int use_collective_io;
- int mpi_rank;
-
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
- for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
- chunk_edge_size = 0;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
-
- chunk_edge_size = 5;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
- } /* end for */
-
- return;
-} /* lower_dim_size_comp_test() */
-
-/*-------------------------------------------------------------------------
- * Function: link_chunk_collective_io_test()
- *
- * Purpose: Test to verify that an error in MPI type management in
- * H5D_link_chunk_collective_io() has been corrected.
- * In this bug, we used to free MPI types regardless of
- * whether they were basic or derived.
- *
- * This test is based on a bug report kindly provided by
- * Rob Latham of the MPICH team and ANL.
- *
- * The basic thrust of the test is to cause a process
- * to participate in a collective I/O in which it:
- *
- * 1) Reads or writes exactly one chunk,
- *
- * 2) Has no in-memory buffer for any other chunk.
- *
- * The test differs from Rob Latham's bug report in
- * that it runs with an arbitrary number of processes,
- * and uses a 1-dimensional dataset.
- *
- * Return: void
- *-------------------------------------------------------------------------
- */
-
-#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
-
-void
-link_chunk_collective_io_test(void)
-{
- /* const char *fcnName = "link_chunk_collective_io_test()"; */
- const char *filename;
- bool mis_match = false;
- int i;
- int mrc;
- int mpi_rank;
- int mpi_size;
- MPI_Comm mpi_comm = MPI_COMM_WORLD;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hsize_t count[1] = {1};
- hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- hsize_t start[1];
- hsize_t dims[1];
- hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- herr_t ret; /* Generic return value */
- hid_t file_id;
- hid_t acc_tpl;
- hid_t dset_id;
- hid_t file_ds_sid;
- hid_t write_mem_ds_sid;
- hid_t read_mem_ds_sid;
- hid_t ds_dcpl_id;
- hid_t xfer_plist;
- double diff;
- double expected_value;
- double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
- double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* Make sure the connector supports the API functions being tested */
- if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
- if (MAINPROCESS) {
- puts("SKIPPED");
- printf(" API functions for basic file or dataset aren't supported with this connector\n");
- fflush(stdout);
- }
-
- return;
- }
-
- assert(mpi_size > 0);
-
- /* get the file name */
- filename = (const char *)PARATESTFILE /* GetTestParameters() */;
- assert(filename != NULL);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
-
- /* create the file collectively */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((file_id >= 0), "H5Fcreate succeeded");
-
- MESG("File opened.");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
-
- /* setup dims */
- dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE));
-
- /* setup mem and file dataspaces */
- write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded");
-
- read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded");
-
- file_ds_sid = H5Screate_simple(1, dims, NULL);
- VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded");
-
- /* setup data set creation property list */
- ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded");
-
- ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED);
- VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded");
-
- ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims);
- VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
-
- /* create the data set */
- dset_id =
- H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded");
-
- /* close the dataset creation property list */
- ret = H5Pclose(ds_dcpl_id);
- VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded");
-
- /* setup local data */
- expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank);
- for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
-
- local_data_written[i] = expected_value;
- local_data_read[i] = 0.0;
- expected_value += 1.0;
- }
-
- /* select the file and mem spaces */
- start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
- ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded");
-
- ret = H5Sselect_all(write_mem_ds_sid);
- VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded");
-
- /* Note that we use NO SELECTION on the read memory dataspace */
-
- /* setup xfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* write the data set */
- ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written);
-
- VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
-
- /* sync with the other processes before checking data */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after dataset write");
-
- /* read this processes slice of the dataset back in */
- ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read);
- VRFY((ret >= 0), "H5Dread() dataset read succeeded");
-
- /* close the xfer property list */
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
-
- /* verify the data */
- mis_match = false;
- for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
-
- diff = local_data_written[i] - local_data_read[i];
- diff = fabs(diff);
-
- if (diff >= 0.001) {
-
- mis_match = true;
- }
- }
- VRFY((mis_match == false), "dataset data good.");
-
- /* Close dataspaces */
- ret = H5Sclose(write_mem_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded");
-
- ret = H5Sclose(read_mem_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded");
-
- ret = H5Sclose(file_ds_sid);
- VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded");
-
- /* Close Dataset */
- ret = H5Dclose(dset_id);
- VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded");
-
- /* close the file collectively */
- ret = H5Fclose(file_id);
- VRFY((ret != FAIL), "file close succeeded");
-
- return;
-
-} /* link_chunk_collective_io_test() */
diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c
deleted file mode 100644
index 1d42c61..0000000
--- a/testpar/API/testphdf5.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Main driver of the Parallel HDF5 tests
- */
-
-#include "hdf5.h"
-#include "testphdf5.h"
-
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
-
-/* global variables */
-int dim0;
-int dim1;
-int chunkdim0;
-int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
-int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
-int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
-
-/* other option flags */
-
-/* FILENAME and filenames must have the same number of names.
- * Use PARATESTFILE in general and use a separated filename only if the file
- * created in one test is accessed by a different test.
- * filenames[0] is reserved as the file name for PARATESTFILE.
- */
-#define NFILENAME 2
-/* #define PARATESTFILE filenames[0] */
-const char *FILENAME[NFILENAME] = {"ParaTest.h5", NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
-
-#ifdef USE_PAUSE
-/* pause the process for a moment to allow debugger to attach if desired. */
-/* Will pause more if greenlight file is not present but will eventually */
-/* continue. */
-#include <sys/types.h>
-#include <sys/stat.h>
-
-void
-pause_proc(void)
-{
-
- int pid;
- h5_stat_t statbuf;
- char greenlight[] = "go";
- int maxloop = 10;
- int loops = 0;
- int time_int = 10;
-
- /* mpi variables */
- int mpi_size, mpi_rank;
- int mpi_namelen;
- char mpi_name[MPI_MAX_PROCESSOR_NAME];
-
- pid = getpid();
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Get_processor_name(mpi_name, &mpi_namelen);
-
- if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
- if (!loops++) {
- printf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
- HDsleep(time_int);
- }
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/* Use the Profile feature of MPI to call the pause_proc() */
-int
-MPI_Init(int *argc, char ***argv)
-{
- int ret_code;
- ret_code = PMPI_Init(argc, argv);
- pause_proc();
- return (ret_code);
-}
-#endif /* USE_PAUSE */
-
-/*
- * Show command usage
- */
-static void
-usage(void)
-{
- printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
- printf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
- printf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
-#if 0
- printf("\t-f <prefix>\tfilename prefix\n");
-#endif
- printf("\t-2\t\tuse Split-file together with MPIO\n");
- printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
- COL_FACTOR);
- printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
- printf("\n");
-}
-
-/*
- * parse the command line options
- */
-static int
-parse_options(int argc, char **argv)
-{
- int mpi_size, mpi_rank; /* mpi variables */
-
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- /* setup default chunk-size. Make sure sizes are > 0 */
-
- chunkdim0 = (dim0 + 9) / 10;
- chunkdim1 = (dim1 + 9) / 10;
-
- while (--argc) {
- if (**(++argv) != '-') {
- break;
- }
- else {
- switch (*(*argv + 1)) {
- case 'm':
- ndatasets = atoi((*argv + 1) + 1);
- if (ndatasets < 0) {
- nerrors++;
- return (1);
- }
- break;
- case 'n':
- ngroups = atoi((*argv + 1) + 1);
- if (ngroups < 0) {
- nerrors++;
- return (1);
- }
- break;
-#if 0
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
-#endif
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2) {
- nerrors++;
- return (1);
- }
- dim0 = atoi(*(++argv)) * mpi_size;
- argc--;
- dim1 = atoi(*(++argv)) * mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0 + 9) / 10;
- chunkdim1 = (dim1 + 9) / 10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2) {
- nerrors++;
- return (1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return (1);
- default:
- printf("Illegal option(%s)\n", *argv);
- nerrors++;
- return (1);
- }
- }
- } /*while*/
-
- /* check validity of dimension and chunk sizes */
- if (dim0 <= 0 || dim1 <= 0) {
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return (1);
- }
- if (chunkdim0 <= 0 || chunkdim1 <= 0) {
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return (1);
- }
-
- /* Make sure datasets can be divided into equal portions by the processes */
- if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
- if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
- nerrors++;
- return (1);
- }
-
- /* compose the test filenames */
- {
- int i, n;
-
- n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i = 0; i < n; i++)
- strncpy(filenames[i], FILENAME[i], PATH_MAX);
-#if 0 /* no support for VFDs right now */
- if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
- printf("h5_fixname failed\n");
- nerrors++;
- return (1);
- }
-#endif
- if (MAINPROCESS) {
- printf("Test filenames are:\n");
- for (i = 0; i < n; i++)
- printf(" %s\n", filenames[i]);
- }
- }
-
- return (0);
-}
-
-/*
- * Create the appropriate File access property list
- */
-hid_t
-create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
-{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
-
- /* need the rank for error checking macros */
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
-
- if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
-
- if (l_facc_type == FACC_MPIO) {
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
- VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, true);
- VRFY((ret >= 0), "");
- return (ret_pl);
- }
-
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return (ret_pl);
- }
-
- /* unknown file access types */
- return (ret_pl);
-}
-
-int
-main(int argc, char **argv)
-{
- int mpi_size, mpi_rank; /* mpi variables */
- herr_t ret;
-
-#if 0
- H5Ptest_param_t ndsets_params, ngroups_params;
- H5Ptest_param_t collngroups_params;
- H5Ptest_param_t io_mode_confusion_params;
- H5Ptest_param_t rr_obj_flush_confusion_params;
-#endif
-
-#ifndef H5_HAVE_WIN32_API
- /* Un-buffer the stdout and stderr */
- HDsetbuf(stderr, NULL);
- HDsetbuf(stdout, NULL);
-#endif
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- dim0 = ROW_FACTOR * mpi_size;
- dim1 = COL_FACTOR * mpi_size;
-
- if (MAINPROCESS) {
- printf("===================================\n");
- printf("PHDF5 TESTS START\n");
- printf("===================================\n");
- }
-
- /* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hung in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
- if (H5dont_atexit() < 0) {
- printf("Failed to turn off atexit processing. Continue.\n");
- };
- H5open();
- /* h5_show_hostname(); */
-
-#if 0
- memset(filenames, 0, sizeof(filenames));
- for (int i = 0; i < NFILENAME; i++) {
- if (NULL == (filenames[i] = malloc(PATH_MAX))) {
- printf("couldn't allocate filename array\n");
- MPI_Abort(MPI_COMM_WORLD, -1);
- }
- }
-#endif
-
- /* Set up file access property list with parallel I/O access */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "H5Pcreate succeeded");
-
- vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
-
- /* Get the capability flag of the VOL connector being used */
- ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
- VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
-
- /* Initialize testing framework */
- /* TestInit(argv[0], usage, parse_options); */
-
- if (parse_options(argc, argv)) {
- usage();
- return 1;
- }
-
- /* Tests are generally arranged from least to most complexity... */
-#if 0
- AddTest("mpiodup", test_fapl_mpio_dup, NULL,
- "fapl_mpio duplicate", NULL);
-#endif
-
- if (MAINPROCESS) {
- printf("fapl_mpio duplicate\n");
- fflush(stdout);
- }
- test_fapl_mpio_dup();
-
-#if 0
- AddTest("split", test_split_comm_access, NULL,
- "dataset using split communicators", PARATESTFILE);
- AddTest("props", test_file_properties, NULL,
- "Coll Metadata file property settings", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("dataset using split communicators\n");
- fflush(stdout);
- }
- test_split_comm_access();
-
- if (MAINPROCESS) {
- printf("Coll Metadata file property settings\n");
- fflush(stdout);
- }
- test_file_properties();
-
-#if 0
- AddTest("idsetw", dataset_writeInd, NULL,
- "dataset independent write", PARATESTFILE);
- AddTest("idsetr", dataset_readInd, NULL,
- "dataset independent read", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("dataset independent write\n");
- fflush(stdout);
- }
- dataset_writeInd();
- if (MAINPROCESS) {
- printf("dataset independent read\n");
- fflush(stdout);
- }
- dataset_readInd();
-
-#if 0
- AddTest("cdsetw", dataset_writeAll, NULL,
- "dataset collective write", PARATESTFILE);
- AddTest("cdsetr", dataset_readAll, NULL,
- "dataset collective read", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("dataset collective write\n");
- fflush(stdout);
- }
- dataset_writeAll();
- if (MAINPROCESS) {
- printf("dataset collective read\n");
- fflush(stdout);
- }
- dataset_readAll();
-
-#if 0
- AddTest("eidsetw", extend_writeInd, NULL,
- "extendible dataset independent write", PARATESTFILE);
- AddTest("eidsetr", extend_readInd, NULL,
- "extendible dataset independent read", PARATESTFILE);
- AddTest("ecdsetw", extend_writeAll, NULL,
- "extendible dataset collective write", PARATESTFILE);
- AddTest("ecdsetr", extend_readAll, NULL,
- "extendible dataset collective read", PARATESTFILE);
- AddTest("eidsetw2", extend_writeInd2, NULL,
- "extendible dataset independent write #2", PARATESTFILE);
- AddTest("selnone", none_selection_chunk, NULL,
- "chunked dataset with none-selection", PARATESTFILE);
- AddTest("calloc", test_chunk_alloc, NULL,
- "parallel extend Chunked allocation on serial file", PARATESTFILE);
- AddTest("fltread", test_filter_read, NULL,
- "parallel read of dataset written serially with filters", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("extendible dataset independent write\n");
- fflush(stdout);
- }
- extend_writeInd();
- if (MAINPROCESS) {
- printf("extendible dataset independent read\n");
- fflush(stdout);
- }
- extend_readInd();
- if (MAINPROCESS) {
- printf("extendible dataset collective write\n");
- fflush(stdout);
- }
- extend_writeAll();
- if (MAINPROCESS) {
- printf("extendible dataset collective read\n");
- fflush(stdout);
- }
- extend_readAll();
- if (MAINPROCESS) {
- printf("extendible dataset independent write #2\n");
- fflush(stdout);
- }
- extend_writeInd2();
- if (MAINPROCESS) {
- printf("chunked dataset with none-selection\n");
- fflush(stdout);
- }
- none_selection_chunk();
- if (MAINPROCESS) {
- printf("parallel extend Chunked allocation on serial file\n");
- fflush(stdout);
- }
- test_chunk_alloc();
- if (MAINPROCESS) {
- printf("parallel read of dataset written serially with filters\n");
- fflush(stdout);
- }
- test_filter_read();
-
-#ifdef H5_HAVE_FILTER_DEFLATE
-#if 0
- AddTest("cmpdsetr", compress_readAll, NULL,
- "compressed dataset collective read", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("compressed dataset collective read\n");
- fflush(stdout);
- }
- compress_readAll();
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-#if 0
- AddTest("zerodsetr", zero_dim_dset, NULL,
- "zero dim dset", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("zero dim dset\n");
- fflush(stdout);
- }
- zero_dim_dset();
-
-#if 0
- ndsets_params.name = PARATESTFILE;
- ndsets_params.count = ndatasets;
- AddTest("ndsetw", multiple_dset_write, NULL,
- "multiple datasets write", &ndsets_params);
-#endif
-
- if (MAINPROCESS) {
- printf("multiple datasets write\n");
- fflush(stdout);
- }
- multiple_dset_write();
-
-#if 0
- ngroups_params.name = PARATESTFILE;
- ngroups_params.count = ngroups;
- AddTest("ngrpw", multiple_group_write, NULL,
- "multiple groups write", &ngroups_params);
- AddTest("ngrpr", multiple_group_read, NULL,
- "multiple groups read", &ngroups_params);
-#endif
-
- if (MAINPROCESS) {
- printf("multiple groups write\n");
- fflush(stdout);
- }
- multiple_group_write();
- if (MAINPROCESS) {
- printf("multiple groups read\n");
- fflush(stdout);
- }
- multiple_group_read();
-
-#if 0
- AddTest("compact", compact_dataset, NULL,
- "compact dataset test", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("compact dataset test\n");
- fflush(stdout);
- }
- compact_dataset();
-
-#if 0
- collngroups_params.name = PARATESTFILE;
- collngroups_params.count = ngroups;
- /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
- AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL,
- "collective grp/dset write - independent grp/dset read",
- &collngroups_params);
-#ifndef H5_HAVE_WIN32_API
- AddTest("bigdset", big_dataset, NULL,
- "big dataset test", PARATESTFILE);
-#else
- printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
-#endif
-#endif
-
- if (MAINPROCESS) {
- printf("collective grp/dset write - independent grp/dset read\n");
- fflush(stdout);
- }
- collective_group_write_independent_group_read();
- if (MAINPROCESS) {
- printf("big dataset test\n");
- fflush(stdout);
- }
- big_dataset();
-
-#if 0
- AddTest("fill", dataset_fillvalue, NULL,
- "dataset fill value", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("dataset fill value\n");
- fflush(stdout);
- }
- dataset_fillvalue();
-
-#if 0
- AddTest("cchunk1",
- coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
- AddTest("cchunk2",
- coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
- AddTest("cchunk3",
- coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
- AddTest("cchunk4",
- coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("simple collective chunk io\n");
- fflush(stdout);
- }
- coll_chunk1();
- if (MAINPROCESS) {
- printf("noncontiguous collective chunk io\n");
- fflush(stdout);
- }
- coll_chunk2();
- if (MAINPROCESS) {
- printf("multi-chunk collective chunk io\n");
- fflush(stdout);
- }
- coll_chunk3();
- if (MAINPROCESS) {
- printf("collective chunk io with partial non-selection\n");
- fflush(stdout);
- }
- coll_chunk4();
-
- if ((mpi_size < 3) && MAINPROCESS) {
- printf("Collective chunk IO optimization APIs ");
- printf("needs at least 3 processes to participate\n");
- printf("Collective chunk IO API tests will be skipped \n");
- }
-
-#if 0
- AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
- coll_chunk5,NULL,
- "linked chunk collective IO without optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
- coll_chunk6,NULL,
- "multi-chunk collective IO with direct request",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
- coll_chunk7,NULL,
- "linked chunk collective IO with optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
- coll_chunk8,NULL,
- "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
- coll_chunk9,NULL,
- "multiple chunk collective IO with optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
- coll_chunk10,NULL,
- "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
-#endif
-
- if (mpi_size >= 3) {
- if (MAINPROCESS) {
- printf("linked chunk collective IO without optimization\n");
- fflush(stdout);
- }
- coll_chunk5();
- if (MAINPROCESS) {
- printf("multi-chunk collective IO with direct request\n");
- fflush(stdout);
- }
- coll_chunk6();
- if (MAINPROCESS) {
- printf("linked chunk collective IO with optimization\n");
- fflush(stdout);
- }
- coll_chunk7();
- if (MAINPROCESS) {
- printf("linked chunk collective IO transferring to multi-chunk\n");
- fflush(stdout);
- }
- coll_chunk8();
- if (MAINPROCESS) {
- printf("multiple chunk collective IO with optimization\n");
- fflush(stdout);
- }
- coll_chunk9();
- if (MAINPROCESS) {
- printf("multiple chunk collective IO transferring to independent IO\n");
- fflush(stdout);
- }
- coll_chunk10();
- }
-
-#if 0
- /* irregular collective IO tests*/
- AddTest("ccontw",
- coll_irregular_cont_write,NULL,
- "collective irregular contiguous write",PARATESTFILE);
- AddTest("ccontr",
- coll_irregular_cont_read,NULL,
- "collective irregular contiguous read",PARATESTFILE);
- AddTest("cschunkw",
- coll_irregular_simple_chunk_write,NULL,
- "collective irregular simple chunk write",PARATESTFILE);
- AddTest("cschunkr",
- coll_irregular_simple_chunk_read,NULL,
- "collective irregular simple chunk read",PARATESTFILE);
- AddTest("ccchunkw",
- coll_irregular_complex_chunk_write,NULL,
- "collective irregular complex chunk write",PARATESTFILE);
- AddTest("ccchunkr",
- coll_irregular_complex_chunk_read,NULL,
- "collective irregular complex chunk read",PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("collective irregular contiguous write\n");
- fflush(stdout);
- }
- coll_irregular_cont_write();
- if (MAINPROCESS) {
- printf("collective irregular contiguous read\n");
- fflush(stdout);
- }
- coll_irregular_cont_read();
- if (MAINPROCESS) {
- printf("collective irregular simple chunk write\n");
- fflush(stdout);
- }
- coll_irregular_simple_chunk_write();
- if (MAINPROCESS) {
- printf("collective irregular simple chunk read\n");
- fflush(stdout);
- }
- coll_irregular_simple_chunk_read();
- if (MAINPROCESS) {
- printf("collective irregular complex chunk write\n");
- fflush(stdout);
- }
- coll_irregular_complex_chunk_write();
- if (MAINPROCESS) {
- printf("collective irregular complex chunk read\n");
- fflush(stdout);
- }
- coll_irregular_complex_chunk_read();
-
-#if 0
- AddTest("null", null_dataset, NULL,
- "null dataset test", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("null dataset test\n");
- fflush(stdout);
- }
- null_dataset();
-
-#if 0
- io_mode_confusion_params.name = PARATESTFILE;
- io_mode_confusion_params.count = 0; /* value not used */
-
- AddTest("I/Omodeconf", io_mode_confusion, NULL,
- "I/O mode confusion test",
- &io_mode_confusion_params);
-#endif
-
- if (MAINPROCESS) {
- printf("I/O mode confusion test\n");
- fflush(stdout);
- }
- io_mode_confusion();
-
- if ((mpi_size < 3) && MAINPROCESS) {
- printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
- printf("rr_obj_hdr_flush_confusion test will be skipped \n");
- }
-
- if (mpi_size > 2) {
-#if 0
- rr_obj_flush_confusion_params.name = PARATESTFILE;
- rr_obj_flush_confusion_params.count = 0; /* value not used */
- AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL,
- "round robin object header flush confusion test",
- &rr_obj_flush_confusion_params);
-#endif
-
- if (MAINPROCESS) {
- printf("round robin object header flush confusion test\n");
- fflush(stdout);
- }
- rr_obj_hdr_flush_confusion();
- }
-
-#if 0
- AddTest("alnbg1",
- chunk_align_bug_1, NULL,
- "Chunk allocation with alignment bug.",
- PARATESTFILE);
-
- AddTest("tldsc",
- lower_dim_size_comp_test, NULL,
- "test lower dim size comp in span tree to mpi derived type",
- PARATESTFILE);
-
- AddTest("lccio",
- link_chunk_collective_io_test, NULL,
- "test mpi derived type management",
- PARATESTFILE);
-
- AddTest("actualio", actual_io_mode_tests, NULL,
- "test actual io mode proprerty",
- PARATESTFILE);
-
- AddTest("nocolcause", no_collective_cause_tests, NULL,
- "test cause for broken collective io",
- PARATESTFILE);
-
- AddTest("edpl", test_plist_ed, NULL,
- "encode/decode Property Lists", NULL);
-#endif
-
- if (MAINPROCESS) {
- printf("Chunk allocation with alignment bug\n");
- fflush(stdout);
- }
- chunk_align_bug_1();
- if (MAINPROCESS) {
- printf("test lower dim size comp in span tree to mpi derived type\n");
- fflush(stdout);
- }
- lower_dim_size_comp_test();
- if (MAINPROCESS) {
- printf("test mpi derived type management\n");
- fflush(stdout);
- }
- link_chunk_collective_io_test();
- if (MAINPROCESS) {
- printf("test actual io mode property - SKIPPED currently due to native-specific testing\n");
- fflush(stdout);
- }
- /* actual_io_mode_tests(); */
- if (MAINPROCESS) {
- printf("test cause for broken collective io - SKIPPED currently due to native-specific testing\n");
- fflush(stdout);
- }
- /* no_collective_cause_tests(); */
- if (MAINPROCESS) {
- printf("encode/decode Property Lists\n");
- fflush(stdout);
- }
- test_plist_ed();
-
- if ((mpi_size < 2) && MAINPROCESS) {
- printf("File Image Ops daisy chain test needs at least 2 processes.\n");
- printf("File Image Ops daisy chain test will be skipped \n");
- }
-
-#if 0
- AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
- "file image ops daisy chain", NULL);
-#endif
-
- if (mpi_size >= 2) {
- if (MAINPROCESS) {
- printf("file image ops daisy chain - SKIPPED currently due to native-specific testing\n");
- fflush(stdout);
- }
- /* file_image_daisy_chain_test(); */
- }
-
- if ((mpi_size < 2) && MAINPROCESS) {
- printf("Atomicity tests need at least 2 processes to participate\n");
- printf("8 is more recommended.. Atomicity tests will be skipped \n");
- }
- else if (facc_type != FACC_MPIO && MAINPROCESS) {
- printf("Atomicity tests will not work with a non MPIO VFD\n");
- }
- else if (mpi_size >= 2 && facc_type == FACC_MPIO) {
-#if 0
- AddTest("atomicity", dataset_atomicity, NULL,
- "dataset atomic updates", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("dataset atomic updates - SKIPPED currently due to native-specific testing\n");
- fflush(stdout);
- }
- /* dataset_atomicity(); */
- }
-
-#if 0
- AddTest("denseattr", test_dense_attr, NULL,
- "Store Dense Attributes", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("Store Dense Attributes\n");
- fflush(stdout);
- }
- test_dense_attr();
-
-#if 0
- AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
- "Collective Metadata read with some ranks having no selection", PARATESTFILE);
- AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL,
- "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE);
- AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL,
- "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
-#endif
-
- if (MAINPROCESS) {
- printf("Collective Metadata read with some ranks having no selection\n");
- fflush(stdout);
- }
- test_partial_no_selection_coll_md_read();
- if (MAINPROCESS) {
- printf("Collective MD read with multi chunk I/O\n");
- fflush(stdout);
- }
- test_multi_chunk_io_addrmap_issue();
- if (MAINPROCESS) {
- printf("Collective MD read with link chunk I/O\n");
- fflush(stdout);
- }
- test_link_chunk_io_sort_chunk_issue();
-
- /* Display testing information */
- /* TestInfo(argv[0]); */
-
- /* setup file access property list */
- H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
-
- /* Parse command line arguments */
- /* TestParseCmdLine(argc, argv); */
-
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
- printf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
- }
-
- /* Perform requested testing */
- /* PerformTests(); */
-
- /* make sure all processes are finished before final report, cleanup
- * and exit.
- */
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* Display test summary, if requested */
- /* if (MAINPROCESS && GetTestSummary())
- TestSummary(); */
-
- /* Clean up test files */
- /* h5_clean_files(FILENAME, fapl); */
- H5Fdelete(FILENAME[0], fapl);
- H5Pclose(fapl);
-
- /* nerrors += GetTestNumErrs(); */
-
- /* Gather errors from all processes */
- {
- int temp;
- MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors = temp;
- }
-
- if (MAINPROCESS) { /* only process 0 reports */
- printf("===================================\n");
- if (nerrors)
- printf("***PHDF5 tests detected %d errors***\n", nerrors);
- else
- printf("PHDF5 tests finished successfully\n");
- printf("===================================\n");
- }
-
-#if 0
- for (int i = 0; i < NFILENAME; i++) {
- free(filenames[i]);
- filenames[i] = NULL;
- }
-#endif
-
- /* close HDF5 library */
- H5close();
-
- /* Release test infrastructure */
- /* TestShutdown(); */
-
- /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
- MPI_Finalize();
-
- /* cannot just return (nerrors) because exit code is limited to 1byte */
- return (nerrors != 0);
-}
diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h
deleted file mode 100644
index 59dd577..0000000
--- a/testpar/API/testphdf5.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/* common definitions used by all parallel hdf5 test programs. */
-
-#ifndef PHDF5TEST_H
-#define PHDF5TEST_H
-
-#include "H5private.h"
-#include "testpar.h"
-
-/*
- * Define parameters for various tests since we do not have access to
- * passing parameters to tests via the testphdf5 test framework.
- */
-#define PARATESTFILE "ParaTest.h5"
-#define NDATASETS 300
-#define NGROUPS 256
-
-/* Disable express testing by default */
-#define EXPRESS_MODE 0
-
-enum H5TEST_COLL_CHUNK_API {
- API_NONE = 0,
- API_LINK_HARD,
- API_MULTI_HARD,
- API_LINK_TRUE,
- API_LINK_FALSE,
- API_MULTI_COLL,
- API_MULTI_IND
-};
-
-#ifndef false
-#define false 0
-#endif
-
-#ifndef true
-#define true 1
-#endif
-
-/* Constants definitions */
-#define DIM0 600 /* Default dataset sizes. */
-#define DIM1 1200 /* Values are from a monitor pixel sizes */
-#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
-#define COL_FACTOR 16 /* Nominal column factor for dataset size */
-#define RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-#define DATASETNAME4 "Data4"
-#define DATASETNAME5 "Data5"
-#define DATASETNAME6 "Data6"
-#define DATASETNAME7 "Data7"
-#define DATASETNAME8 "Data8"
-#define DATASETNAME9 "Data9"
-
-/* point selection order */
-#define IN_ORDER 1
-#define OUT_OF_ORDER 2
-
-/* Hyperslab layout styles */
-#define BYROW 1 /* divide into slabs of rows */
-#define BYCOL 2 /* divide into blocks of columns */
-#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */
-#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */
-
-/* File_Access_type bits */
-#define FACC_DEFAULT 0x0 /* default */
-#define FACC_MPIO 0x1 /* MPIO */
-#define FACC_SPLIT 0x2 /* Split File */
-
-#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
-#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
-/*Constants for collective chunk definitions */
-#define SPACE_DIM1 24
-#define SPACE_DIM2 4
-#define BYROW_CONT 1
-#define BYROW_DISCONT 2
-#define BYROW_SELECTNONE 3
-#define BYROW_SELECTUNBALANCE 4
-#define BYROW_SELECTINCHUNK 5
-
-#define DIMO_NUM_CHUNK 4
-#define DIM1_NUM_CHUNK 2
-#define LINK_TRUE_NUM_CHUNK 2
-#define LINK_FALSE_NUM_CHUNK 6
-#define MULTI_TRUE_PERCENT 50
-#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_TRUE"
-#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_FALSE"
-#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
-#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
-#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
-#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp"
-
-#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
-
-/*Constants for MPI derived data type generated from span tree */
-
-#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
-#define MSPACE1_DIM 27000 /* Dataset size in memory */
-#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
-#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
-#define FSPACE_DIM2 3600
-/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
-#define MSPACE_RANK 2
-#define MSPACE_DIM1 9
-#define MSPACE_DIM2 3600
-#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
-#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
-#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
-#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
-
-#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
-#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
-#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
-#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
-#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-
-#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
-#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
-
-#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-#define NPOINTS \
- 4 /* Number of points that will be selected \
- and overwritten */
-
-/* Definitions of the selection mode for the test_actual_io_function. */
-#define TEST_ACTUAL_IO_NO_COLLECTIVE 0
-#define TEST_ACTUAL_IO_RESET 1
-#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2
-#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
-#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
-#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
-#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
-#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
-#define TEST_ACTUAL_IO_LINK_CHUNK 8
-#define TEST_ACTUAL_IO_CONTIGUOUS 9
-
-/* Definitions of the selection mode for the no_collective_cause_tests function. */
-#define TEST_COLLECTIVE 0x001
-#define TEST_SET_INDEPENDENT 0x002
-#define TEST_DATATYPE_CONVERSION 0x004
-#define TEST_DATA_TRANSFORMS 0x008
-#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
-#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
-#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
-
-/* Don't erase these lines, they are put here for debugging purposes */
-/*
-#define MSPACE1_RANK 1
-#define MSPACE1_DIM 50
-#define MSPACE2_RANK 1
-#define MSPACE2_DIM 4
-#define FSPACE_RANK 2
-#define FSPACE_DIM1 8
-#define FSPACE_DIM2 12
-#define MSPACE_RANK 2
-#define MSPACE_DIM1 8
-#define MSPACE_DIM2 9
-#define NPOINTS 4
-*/ /* end of debugging macro */
-
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
-/* Collective chunk instrumentation properties */
-#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard"
-#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard"
-#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_TRUE"
-#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_FALSE"
-#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll"
-#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind"
-
-/* Definitions for all collective chunk instrumentation properties */
-#define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned)
-#define H5D_XFER_COLL_CHUNK_DEF 1
-
-/* General collective I/O instrumentation properties */
-#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast"
-
-/* Definitions for general collective I/O instrumentation properties */
-#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(bool)
-#define H5D_XFER_COLL_RANK0_BCAST_DEF false
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
-
-/* type definitions */
-typedef struct H5Ptest_param_t /* holds extra test parameters */
-{
- char *name;
- int count;
-} H5Ptest_param_t;
-
-/* Dataset data type. Int's can be easily octo dumped. */
-typedef int DATATYPE;
-
-/* Shape Same Tests Definitions */
-typedef enum {
- IND_CONTIG, /* Independent IO on contiguous datasets */
- COL_CONTIG, /* Collective IO on contiguous datasets */
- IND_CHUNKED, /* Independent IO on chunked datasets */
- COL_CHUNKED /* Collective IO on chunked datasets */
-} ShapeSameTestMethods;
-
-/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void *old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
-extern int dxfer_coll_type;
-
-/* Test program prototypes */
-void test_plist_ed(void);
-#if 0
-void external_links(void);
-#endif
-void zero_dim_dset(void);
-void test_file_properties(void);
-void test_delete(void);
-void multiple_dset_write(void);
-void multiple_group_write(void);
-void multiple_group_read(void);
-void collective_group_write_independent_group_read(void);
-void collective_group_write(void);
-void independent_group_read(void);
-void test_fapl_mpio_dup(void);
-void test_split_comm_access(void);
-void test_page_buffer_access(void);
-void dataset_atomicity(void);
-void dataset_writeInd(void);
-void dataset_writeAll(void);
-void extend_writeInd(void);
-void extend_writeInd2(void);
-void extend_writeAll(void);
-void dataset_readInd(void);
-void dataset_readAll(void);
-void extend_readInd(void);
-void extend_readAll(void);
-void none_selection_chunk(void);
-void actual_io_mode_tests(void);
-void no_collective_cause_tests(void);
-void test_chunk_alloc(void);
-void test_filter_read(void);
-void compact_dataset(void);
-void null_dataset(void);
-void big_dataset(void);
-void dataset_fillvalue(void);
-void coll_chunk1(void);
-void coll_chunk2(void);
-void coll_chunk3(void);
-void coll_chunk4(void);
-void coll_chunk5(void);
-void coll_chunk6(void);
-void coll_chunk7(void);
-void coll_chunk8(void);
-void coll_chunk9(void);
-void coll_chunk10(void);
-void coll_irregular_cont_read(void);
-void coll_irregular_cont_write(void);
-void coll_irregular_simple_chunk_read(void);
-void coll_irregular_simple_chunk_write(void);
-void coll_irregular_complex_chunk_read(void);
-void coll_irregular_complex_chunk_write(void);
-void io_mode_confusion(void);
-void rr_obj_hdr_flush_confusion(void);
-void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm);
-void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm);
-void chunk_align_bug_1(void);
-void lower_dim_size_comp_test(void);
-void link_chunk_collective_io_test(void);
-void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
-void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
-void file_image_daisy_chain_test(void);
-#ifdef H5_HAVE_FILTER_DEFLATE
-void compress_readAll(void);
-#endif /* H5_HAVE_FILTER_DEFLATE */
-void test_dense_attr(void);
-void test_partial_no_selection_coll_md_read(void);
-void test_multi_chunk_io_addrmap_issue(void);
-void test_link_chunk_io_sort_chunk_issue(void);
-void test_collective_global_heap_write(void);
-
-/* commonly used prototypes */
-hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
-MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info);
-int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
- DATATYPE *original);
-void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
- hsize_t coords[], int order);
-#endif /* PHDF5TEST_H */
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 2726f91..910c7a2 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
#include "testphdf5.h"
@@ -1854,7 +1865,8 @@ main(int argc, char **argv)
{
hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
- hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
+ hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
+ hid_t acc_plist = H5I_INVALID_HID;
/* Having set the bigio handling to a size that is manageable,
* we'll set our 'bigcount' variable to be 2X that limit so
@@ -1879,6 +1891,30 @@ main(int argc, char **argv)
/* set alarm. */
TestAlarmOn();
+ acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) {
+ if (MAIN_PROCESS)
+ printf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return -1;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close,
+ * and H5Dget_space. */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAIN_PROCESS)
+ printf(
+ "API functions for basic file, dataset basic or more aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
@@ -1900,9 +1936,6 @@ main(int argc, char **argv)
H5_mpi_set_bigio_count(oldsize);
single_rank_independent_io();
- /* turn off alarm */
- TestAlarmOff();
-
if (mpi_rank_g == 0) {
hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1926,6 +1959,11 @@ main(int argc, char **argv)
printf("==================================================\n");
}
+ H5Pclose(acc_plist);
+
+ /* turn off alarm */
+ TestAlarmOff();
+
/* close HDF5 library */
H5close();
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index d02951d..1d59783 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -80,6 +80,8 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
/* Only MAINPROCESS should create the file. Others just wait. */
if (MAINPROCESS) {
+ bool vol_is_native;
+
nchunks = chunk_factor * mpi_size;
dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
/* Create the data space with unlimited dimensions. */
@@ -93,6 +95,9 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
VRFY((file_id >= 0), "H5Fcreate");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native) >= 0), "h5_using_native_vol");
+
/* Modify dataset creation properties, i.e. enable chunking */
cparms = H5Pcreate(H5P_DATASET_CREATE);
VRFY((cparms >= 0), "");
@@ -142,10 +147,12 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
VRFY((hrc >= 0), "");
file_id = -1;
- /* verify file size */
- filesize = get_filesize(filename);
- est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
- VRFY((filesize >= est_filesize), "file size check");
+ if (vol_is_native) {
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+ }
}
/* Make sure all processes are done before exiting this routine. Otherwise,
@@ -187,6 +194,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
MPI_Offset filesize, /* actual file size */
est_filesize; /* estimated file size */
+ bool vol_is_native;
+
/* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -206,12 +215,20 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
VRFY((*file_id >= 0), "");
}
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, *file_id, &vol_is_native) >= 0), "h5_using_native_vol");
+
/* Open dataset*/
if (*dataset < 0) {
*dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
VRFY((*dataset >= 0), "");
}
+ /* Make sure all processes are done before continuing. Otherwise, one
+ * process could change the dataset extent before another finishes opening
+ * it, resulting in only some of the processes calling H5Dset_extent(). */
+ MPI_Barrier(MPI_COMM_WORLD);
+
memspace = H5Screate_simple(1, chunk_dims, NULL);
VRFY((memspace >= 0), "");
@@ -277,10 +294,12 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
VRFY((hrc >= 0), "");
*file_id = -1;
- /* verify file size */
- filesize = get_filesize(filename);
- est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
- VRFY((filesize >= est_filesize), "file size check");
+ if (vol_is_native) {
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+ }
/* Can close some plists */
hrc = H5Pclose(access_plist);
@@ -448,6 +467,19 @@ test_chunk_alloc(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
printf("Extend Chunked allocation test on file %s\n", filename);
@@ -530,6 +562,7 @@ test_chunk_alloc_incr_ser_to_par(void)
int *data = NULL;
int *correct_data = NULL;
int *read_data = NULL;
+ bool vol_is_native;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -598,6 +631,9 @@ test_chunk_alloc_incr_ser_to_par(void)
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
VRFY((fid >= 0), "H5Fopen");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native) >= 0), "h5_using_native_vol");
+
data = malloc((dset_dims[0] / (hsize_t)mpi_size) * sizeof(int));
VRFY(data, "malloc");
read_data = malloc(dset_dims[0] * sizeof(int));
@@ -613,13 +649,17 @@ test_chunk_alloc_incr_ser_to_par(void)
dset_id = H5Dopen2(fid, "dset_no_filter", H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dopen2");
- ret = H5Dget_space_status(dset_id, &space_status);
- VRFY((ret == SUCCEED), "H5Dread");
+ if (vol_is_native) {
+ ret = H5Dget_space_status(dset_id, &space_status);
+ VRFY((ret == SUCCEED), "H5Dread");
- VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded");
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED),
+ "file space allocation status verification succeeded");
- alloc_size = H5Dget_storage_size(dset_id);
- VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded");
+ alloc_size = H5Dget_storage_size(dset_id);
+ VRFY(((dset_dims[0] * sizeof(int)) == alloc_size),
+ "file space allocation size verification succeeded");
+ }
memset(read_data, 255, dset_dims[0] * sizeof(int));
memset(correct_data, 0, dset_dims[0] * sizeof(int));
@@ -649,13 +689,17 @@ test_chunk_alloc_incr_ser_to_par(void)
MPI_Barrier(MPI_COMM_WORLD);
- ret = H5Dget_space_status(dset_id, &space_status);
- VRFY((ret == SUCCEED), "H5Dread");
+ if (vol_is_native) {
+ ret = H5Dget_space_status(dset_id, &space_status);
+ VRFY((ret == SUCCEED), "H5Dread");
- VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded");
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED),
+ "file space allocation status verification succeeded");
- alloc_size = H5Dget_storage_size(dset_id);
- VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded");
+ alloc_size = H5Dget_storage_size(dset_id);
+ VRFY(((dset_dims[0] * sizeof(int)) == alloc_size),
+ "file space allocation size verification succeeded");
+ }
memset(read_data, 0, dset_dims[0] * sizeof(int));
memset(correct_data, 255, dset_dims[0] * sizeof(int));
@@ -680,14 +724,16 @@ test_chunk_alloc_incr_ser_to_par(void)
dset_id = H5Dopen2(fid, "dset_filter", H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dopen2");
- ret = H5Dget_space_status(dset_id, &space_status);
- VRFY((ret == SUCCEED), "H5Dread");
+ if (vol_is_native) {
+ ret = H5Dget_space_status(dset_id, &space_status);
+ VRFY((ret == SUCCEED), "H5Dread");
- VRFY((space_status == H5D_SPACE_STATUS_NOT_ALLOCATED),
- "file space allocation status verification succeeded");
+ VRFY((space_status == H5D_SPACE_STATUS_NOT_ALLOCATED),
+ "file space allocation status verification succeeded");
- alloc_size = H5Dget_storage_size(dset_id);
- VRFY((0 == alloc_size), "file space allocation size verification succeeded");
+ alloc_size = H5Dget_storage_size(dset_id);
+ VRFY((0 == alloc_size), "file space allocation size verification succeeded");
+ }
memset(read_data, 255, dset_dims[0] * sizeof(int));
memset(correct_data, 0, dset_dims[0] * sizeof(int));
@@ -723,13 +769,17 @@ test_chunk_alloc_incr_ser_to_par(void)
MPI_Barrier(MPI_COMM_WORLD);
- ret = H5Dget_space_status(dset_id, &space_status);
- VRFY((ret == SUCCEED), "H5Dread");
+ if (vol_is_native) {
+ ret = H5Dget_space_status(dset_id, &space_status);
+ VRFY((ret == SUCCEED), "H5Dread");
- VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded");
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED),
+ "file space allocation status verification succeeded");
- alloc_size = H5Dget_storage_size(dset_id);
- VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded");
+ alloc_size = H5Dget_storage_size(dset_id);
+ VRFY(((dset_dims[0] * sizeof(int)) == alloc_size),
+ "file space allocation size verification succeeded");
+ }
memset(read_data, 0, dset_dims[0] * sizeof(int));
memset(correct_data, 255, dset_dims[0] * sizeof(int));
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 1ff7a8e..fa3459d 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -67,6 +67,22 @@ void
coll_chunk1(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -113,6 +129,22 @@ void
coll_chunk2(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -161,8 +193,24 @@ coll_chunk3(void)
{
const char *filename = GetTestParameters();
int mpi_size;
+ int mpi_rank;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
@@ -209,6 +257,22 @@ void
coll_chunk4(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -256,6 +320,22 @@ void
coll_chunk5(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER);
@@ -305,6 +385,22 @@ void
coll_chunk6(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER);
@@ -352,6 +448,22 @@ void
coll_chunk7(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER);
@@ -399,6 +511,22 @@ void
coll_chunk8(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER);
@@ -446,6 +574,22 @@ void
coll_chunk9(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
@@ -493,6 +637,22 @@ void
coll_chunk10(void)
{
const char *filename = GetTestParameters();
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
@@ -506,15 +666,15 @@ coll_chunk10(void)
}
/*-------------------------------------------------------------------------
- * Function: coll_chunktest
+ * Function: coll_chunktest
*
* Purpose: The real testing routine for regular selection of collective
* chunking storage testing both write and read,
* If anything fails, it may be read or write. There is no
* separation test between read and write.
*
- * Return: Success: 0
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
*-------------------------------------------------------------------------
*/
diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c
index 9c6fc71..043ecf8 100644
--- a/testpar/t_coll_md.c
+++ b/testpar/t_coll_md.c
@@ -89,6 +89,19 @@ test_partial_no_selection_coll_md_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -271,6 +284,19 @@ test_multi_chunk_io_addrmap_issue(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -388,6 +414,19 @@ test_link_chunk_io_sort_chunk_issue(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -531,6 +570,19 @@ test_collective_global_heap_write(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 83d7511..67d11d2 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -301,6 +301,19 @@ dataset_writeInd(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -439,6 +452,19 @@ dataset_readInd(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -569,6 +595,19 @@ dataset_writeAll(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* set up the coords array selection */
num_points = (size_t)dim1;
coords = (hsize_t *)malloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
@@ -1085,6 +1124,19 @@ dataset_readAll(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* set up the coords array selection */
num_points = (size_t)dim1;
coords = (hsize_t *)malloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
@@ -1499,6 +1551,19 @@ extend_writeInd(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
@@ -1714,6 +1779,19 @@ extend_writeInd2(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* -------------------
* START AN HDF5 FILE
* -------------------*/
@@ -1877,6 +1955,19 @@ extend_readInd(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -2058,6 +2149,19 @@ extend_writeAll(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
@@ -2295,6 +2399,19 @@ extend_readAll(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -2485,6 +2602,17 @@ compress_readAll(void)
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* Allocate data buffer */
data_orig = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_orig != NULL), "data_origin1 malloc succeeded");
@@ -2677,6 +2805,17 @@ none_selection_chunk(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
@@ -2954,6 +3093,17 @@ test_actual_io_mode(int selection_mode)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_size >= 1);
@@ -3474,6 +3624,19 @@ test_no_collective_cause_mode(int selection_mode)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_size >= 1);
@@ -3739,8 +3902,6 @@ test_no_collective_cause_mode(int selection_mode)
/* Release some resources */
if (sid)
H5Sclose(sid);
- if (fapl)
- H5Pclose(fapl);
if (dcpl)
H5Pclose(dcpl);
if (dxpl_write)
@@ -3759,7 +3920,10 @@ test_no_collective_cause_mode(int selection_mode)
/* clean up external file */
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL)
- HDremove(FILE_EXTERNAL);
+ H5Fdelete(FILE_EXTERNAL, fapl);
+
+ if (fapl)
+ H5Pclose(fapl);
return;
}
@@ -3845,6 +4009,19 @@ dataset_atomicity(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
write_buf = (int *)calloc((size_t)buf_size, sizeof(int));
@@ -4151,14 +4328,27 @@ test_dense_attr(void)
herr_t status;
const char *filename;
- /* get filename */
- filename = (const char *)GetTestParameters();
- assert(filename != NULL);
-
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* get filename */
+ filename = (const char *)GetTestParameters();
+ assert(filename != NULL);
+
fpid = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fpid > 0), "H5Pcreate succeeded");
status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 8f8b291..493e6d2 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -71,6 +71,18 @@ test_split_comm_access(void)
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
is_old = mpi_rank % 2;
mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
VRFY((mrc == MPI_SUCCESS), "");
@@ -771,13 +783,25 @@ test_file_properties(void)
int mpi_ret; /* MPI return value */
int cmp; /* Compare value */
- filename = (const char *)GetTestParameters();
-
/* set up MPI parameters */
mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)GetTestParameters();
+
mpi_ret = MPI_Info_create(&info);
VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
@@ -964,6 +988,18 @@ test_delete(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or file more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* setup file access plist */
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 755831b..1790685 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -84,6 +84,20 @@ file_image_daisy_chain_test(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* setup file name */
snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 01695ab..f001cc9 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -52,7 +52,8 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
hsize_t hs_size[2]; /* Hyperslab size */
size_t i, j; /* Local index variables */
char name[32] = "dataset";
- herr_t hrc; /* Error status */
+ herr_t hrc; /* Error status */
+ bool vol_is_native;
int *points = NULL; /* Writing buffer for entire dataset */
int *check = NULL; /* Reading buffer for selected hyperslab */
@@ -93,6 +94,9 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
VRFY(file >= 0, "H5Fcreate");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native) >= 0), "h5_using_native_vol");
+
/* Create the dataset */
dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY(dataset >= 0, "H5Dcreate2");
@@ -100,8 +104,10 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points);
VRFY(hrc >= 0, "H5Dwrite");
- *dset_size = H5Dget_storage_size(dataset);
- VRFY(*dset_size > 0, "H5Dget_storage_size");
+ if (vol_is_native) {
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size > 0, "H5Dget_storage_size");
+ }
hrc = H5Dclose(dataset);
VRFY(hrc >= 0, "H5Dclose");
@@ -124,6 +130,9 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
file = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
VRFY((file >= 0), "H5Fopen");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native) >= 0), "h5_using_native_vol");
+
dataset = H5Dopen2(file, name, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2");
@@ -150,9 +159,11 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
}
}
- /* Get the storage size of the dataset */
- *dset_size = H5Dget_storage_size(dataset);
- VRFY(*dset_size != 0, "H5Dget_storage_size");
+ if (vol_is_native) {
+ /* Get the storage size of the dataset */
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size != 0, "H5Dget_storage_size");
+ }
/* Clean up objects used for this test */
hrc = H5Dclose(dataset);
@@ -194,9 +205,8 @@ test_filter_read(void)
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char *filename;
-#ifdef H5_HAVE_FILTER_FLETCHER32
- hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
-#endif
+ bool vol_is_native;
+ hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
#ifdef H5_HAVE_FILTER_DEFLATE
hsize_t deflate_size; /* Size of dataset with deflate filter */
@@ -208,7 +218,7 @@ test_filter_read(void)
unsigned szip_pixels_per_block = 4;
#endif /* H5_HAVE_FILTER_SZIP */
- hsize_t shuffle_size; /* Size of dataset with shuffle filter */
+ hsize_t shuffle_size = 0; /* Size of dataset with shuffle filter */
#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
hsize_t combo_size; /* Size of dataset with multiple filters */
@@ -219,6 +229,24 @@ test_filter_read(void)
if (VERBOSE_MED)
printf("Parallel reading of dataset written with filters %s\n", filename);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, dataset or filter aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* Check if native VOL is being used */
+ VRFY(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native) >= 0, "h5_using_native_vol");
+
/*----------------------------------------------------------
* STEP 0: Test without filters.
*----------------------------------------------------------
@@ -258,7 +286,6 @@ test_filter_read(void)
* STEP 1: Test Fletcher32 Checksum by itself.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_FLETCHER32
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc >= 0, "H5Pset_filter");
@@ -273,14 +300,14 @@ test_filter_read(void)
VRFY(hrc >= 0, "H5Pset_filter");
filter_read_internal(filename, dc, &fletcher32_size);
- VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
+
+ if (vol_is_native)
+ VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
/* Clean up objects used for this test */
hrc = H5Pclose(dc);
VRFY(hrc >= 0, "H5Pclose");
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
-
/*----------------------------------------------------------
* STEP 2: Test deflation by itself.
*----------------------------------------------------------
@@ -349,7 +376,9 @@ test_filter_read(void)
VRFY(hrc >= 0, "H5Pset_shuffle");
filter_read_internal(filename, dc, &shuffle_size);
- VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
+
+ if (vol_is_native)
+ VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
/* Clean up objects used for this test */
hrc = H5Pclose(dc);
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 582e441..b9cb4cc 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -84,6 +84,17 @@ zero_dim_dset(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -156,6 +167,17 @@ multiple_dset_write(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
outme = malloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "malloc succeeded for outme");
@@ -235,6 +257,17 @@ compact_dataset(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
outme = malloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
VRFY((outme != NULL), "malloc succeeded for outme");
@@ -357,6 +390,19 @@ null_dataset(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or attribute aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -456,12 +502,24 @@ big_dataset(void)
hsize_t file_dims[4]; /* Dimensions of dataspace */
char dname[] = "dataset"; /* Name of dataset */
MPI_Offset file_size; /* Size of file on disk */
- herr_t ret; /* Generic return value */
+ bool vol_is_native;
+ herr_t ret; /* Generic return value */
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/* Verify MPI_Offset can handle larger than 2GB sizes */
VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4");
@@ -476,6 +534,9 @@ big_dataset(void)
iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((iof >= 0), "H5Fcreate succeeded");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, iof, &vol_is_native) >= 0), "h5_using_native_vol");
+
/* Define dataspace for 2GB dataspace */
file_dims[0] = 2;
file_dims[1] = 1024;
@@ -495,9 +556,11 @@ big_dataset(void)
ret = H5Fclose(iof);
VRFY((ret >= 0), "H5Fclose succeeded");
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
+ if (vol_is_native) {
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
+ }
/*
* Create >4GB HDF5 file
@@ -524,9 +587,11 @@ big_dataset(void)
ret = H5Fclose(iof);
VRFY((ret >= 0), "H5Fclose succeeded");
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
+ if (vol_is_native) {
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
+ }
/*
* Create >8GB HDF5 file
@@ -553,9 +618,11 @@ big_dataset(void)
ret = H5Fclose(iof);
VRFY((ret >= 0), "H5Fclose succeeded");
- /* Check that file of the correct size was created */
- file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
+ if (vol_is_native) {
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
+ }
/* Close fapl */
ret = H5Pclose(fapl);
@@ -594,6 +661,17 @@ dataset_fillvalue(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = GetTestParameters();
/* Set the dataset dimension to be one row more than number of processes */
@@ -842,6 +920,19 @@ collective_group_write(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
size = get_size();
chunk_size[0] = (hsize_t)(size / 2);
@@ -935,6 +1026,19 @@ independent_group_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
H5Pset_all_coll_metadata_ops(plist, false);
@@ -1055,6 +1159,19 @@ multiple_group_write(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
size = get_size();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -1210,6 +1327,19 @@ multiple_group_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
size = get_size();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -1526,6 +1656,19 @@ io_mode_confusion(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
/*
* Set up file access property list with parallel I/O access
*/
@@ -1775,6 +1918,20 @@ rr_obj_hdr_flush_confusion(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or "
+ "file flush aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
assert(mpi_size > 2);
is_reader = mpi_rank % 2;
@@ -2551,12 +2708,24 @@ chunk_align_bug_1(void)
hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
h5_stat_size_t file_size;
- hsize_t align;
+ hsize_t align = 1;
+ bool vol_is_native;
herr_t ret;
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
filename = (const char *)GetTestParameters();
/* Create file without alignment */
@@ -2565,18 +2734,23 @@ chunk_align_bug_1(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
VRFY((file_id >= 0), "H5Fcreate succeeded");
+ /* Check if native VOL is being used */
+ VRFY((h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native) >= 0), "h5_using_native_vol");
+
/* Close file */
ret = H5Fclose(file_id);
VRFY((ret >= 0), "H5Fclose succeeded");
- /* Get file size */
- file_size = h5_get_file_size(filename, fapl_id);
- VRFY((file_size >= 0), "h5_get_file_size succeeded");
+ if (vol_is_native) {
+ /* Get file size */
+ file_size = h5_get_file_size(filename, fapl_id);
+ VRFY((file_size >= 0), "h5_get_file_size succeeded");
- /* Calculate alignment value, set to allow a chunk to squeak in between the
- * original EOF and the aligned location of the aggregator. Add some space
- * for the dataset metadata */
- align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
+ /* Calculate alignment value, set to allow a chunk to squeak in between the
+ * original EOF and the aligned location of the aggregator. Add some space
+ * for the dataset metadata */
+ align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
+ }
/* Set aggregator size and alignment, disable metadata aggregator */
assert(AGGR_SIZE > CHUNK_SIZE);
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index de36abf..23710d7 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -52,6 +52,7 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
void *rbuf;
MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
+ VRFY((recv_size >= 0), "MPI_Recv succeeded");
buf_size = (size_t)recv_size;
rbuf = (uint8_t *)malloc(buf_size);
MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index b0b5da7..47c78d0 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -52,6 +52,25 @@ main(int argc, char **argv)
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Get the capability flag of the VOL connector being used */
+ ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
+ VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+ }
+
ret = H5Pset_fapl_mpio(fapl, comm, info);
VRFY((ret >= 0), "");
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 0a3d3d0..4f48f93 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -24,6 +24,21 @@
#include "H5Spkg.h" /* Dataspaces */
#include "testphdf5.h"
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* FILENAME and filenames must have the same number of names.
+ * Use PARATESTFILE in general and use a separated filename only if the file
+ * created in one test is accessed by a different test.
+ * filenames[0] is reserved as the file name for PARATESTFILE.
+ */
+#define NFILENAME 2
+#define PARATESTFILE filenames[0]
+const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL};
+char *filenames[NFILENAME];
+hid_t fapl; /* file access property list */
+
/* On Lustre (and perhaps other parallel file systems?), we have severe
* slow downs if two or more processes attempt to access the same file system
* block. To minimize this problem, we set alignment in the shape same tests
@@ -1685,7 +1700,8 @@ static void
contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
const int small_rank, const int large_rank, const bool use_collective_io,
const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
- int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
+ int mpi_rank)
{
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
@@ -1751,6 +1767,10 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* int64_t tests_skipped = */ 0};
struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io,
dset_type, express_test, tv_ptr);
@@ -1923,9 +1943,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, independent I/O */
chunk_edge_size = 0;
- contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, false, dset_type, express_test, &skips,
- max_skips, &total_tests, &tests_run, &tests_skipped);
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case IND_CONTIG */
@@ -1934,9 +1954,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
- contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, true, dset_type, express_test, &skips,
- max_skips, &total_tests, &tests_run, &tests_skipped);
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case COL_CONTIG */
@@ -1945,9 +1965,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, independent I/O */
chunk_edge_size = 5;
- contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, false, dset_type, express_test, &skips,
- max_skips, &total_tests, &tests_run, &tests_skipped);
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case IND_CHUNKED */
@@ -1956,9 +1976,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, collective I/O */
chunk_edge_size = 5;
- contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, true, dset_type, express_test, &skips,
- max_skips, &total_tests, &tests_run, &tests_skipped);
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case COL_CHUNKED */
@@ -1977,9 +1997,13 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
}
- if ((MAINPROCESS) && (tests_skipped > 0)) {
- fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
- tests_skipped, total_tests);
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ printf("\n");
}
return;
@@ -3609,7 +3633,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
const int chunk_edge_size, const int small_rank, const int large_rank,
const bool use_collective_io, const hid_t dset_type, const int express_test,
int *skips_ptr, int max_skips, int64_t *total_tests_ptr,
- int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
+ int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank)
{
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
@@ -3676,6 +3700,10 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* int64_t tests_skipped = */ 0};
struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank,
use_collective_io, dset_type, express_test, tv_ptr);
@@ -3840,7 +3868,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
- &tests_skipped);
+ &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case IND_CONTIG */
@@ -3848,9 +3876,10 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
case COL_CONTIG:
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
- ckrbrd_hs_dr_pio_test__run_test(
- test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true,
- dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, true, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case COL_CONTIG */
@@ -3861,7 +3890,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
- &tests_skipped);
+ &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case IND_CHUNKED */
@@ -3869,9 +3898,10 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
case COL_CHUNKED:
/* chunked data set, collective I/O */
chunk_edge_size = 5;
- ckrbrd_hs_dr_pio_test__run_test(
- test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true,
- dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, true, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case COL_CHUNKED */
@@ -3890,9 +3920,13 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
}
- if ((MAINPROCESS) && (tests_skipped > 0)) {
- fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
- tests_skipped, total_tests);
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ printf("\n");
}
return;
@@ -3905,12 +3939,6 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
* Main driver of the Parallel HDF5 tests
*/
-#include "testphdf5.h"
-
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
-
/* global variables */
int dim0;
int dim1;
@@ -3928,17 +3956,6 @@ void *old_client_data; /* previous error handler arg.*/
/* other option flags */
-/* FILENAME and filenames must have the same number of names.
- * Use PARATESTFILE in general and use a separated filename only if the file
- * created in one test is accessed by a different test.
- * filenames[0] is reserved as the file name for PARATESTFILE.
- */
-#define NFILENAME 2
-#define PARATESTFILE filenames[0]
-const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL};
-char *filenames[NFILENAME];
-hid_t fapl; /* file access property list */
-
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
/* Will pause more if greenlight file is not present but will eventually */
@@ -4289,6 +4306,28 @@ main(int argc, char **argv)
H5open();
h5_show_hostname();
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) {
+ if (MAINPROCESS)
+ printf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return -1;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close,
+ */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS)
+ printf("API functions for basic file and dataset aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
memset(filenames, 0, sizeof(filenames));
for (int i = 0; i < NFILENAME; i++) {
if (NULL == (filenames[i] = malloc(PATH_MAX))) {
@@ -4316,7 +4355,6 @@ main(int argc, char **argv)
TestInfo(argv[0]);
/* setup file access property list */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* Parse command line arguments */
@@ -4343,6 +4381,8 @@ main(int argc, char **argv)
/* Clean up test files */
h5_clean_files(FILENAME, fapl);
+ H5Pclose(fapl);
+
nerrors += GetTestNumErrs();
/* Gather errors from all processes */
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index e4ff258..b381ef5 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -21,7 +21,7 @@
one in collective mode,
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
- independent read to read collecive output,
+ independent read to read collective output,
Compare the result,
If the result is the same, then collective write succeeds.
2. collective read to read independent output,
@@ -54,6 +54,22 @@ static void coll_read_test(void);
void
coll_irregular_cont_write(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_write_test(0);
}
@@ -73,6 +89,22 @@ coll_irregular_cont_write(void)
void
coll_irregular_cont_read(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_read_test();
}
@@ -92,6 +124,22 @@ coll_irregular_cont_read(void)
void
coll_irregular_simple_chunk_write(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_write_test(1);
}
@@ -111,6 +159,22 @@ coll_irregular_simple_chunk_write(void)
void
coll_irregular_simple_chunk_read(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_read_test();
}
@@ -130,6 +194,22 @@ coll_irregular_simple_chunk_read(void)
void
coll_irregular_complex_chunk_write(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_write_test(4);
}
@@ -149,6 +229,22 @@ coll_irregular_complex_chunk_write(void)
void
coll_irregular_complex_chunk_read(void)
{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
coll_read_test();
}
@@ -1775,6 +1871,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_col
ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+ /* sync with the other processes before reading data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
/* read the small data set back to verify that it contains the
* expected data. Note that each process reads in the entire
* data set and verifies it.
@@ -2254,6 +2354,20 @@ lower_dim_size_comp_test(void)
/* const char *fcnName = "lower_dim_size_comp_test()"; */
int chunk_edge_size = 0;
int use_collective_io;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
@@ -2331,6 +2445,17 @@ link_chunk_collective_io_test(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
assert(mpi_size > 0);
/* get the file name */
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index e094ad6..57ef5c9 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -234,7 +234,7 @@ parse_options(int argc, char **argv)
nerrors++;
return (1);
}
- if (mpi_rank == 0) {
+ if (MAINPROCESS) {
printf("Test filenames are:\n");
for (i = 0; i < n; i++)
printf(" %s\n", filenames[i]);
@@ -346,6 +346,15 @@ main(int argc, char **argv)
}
}
+ /* Set up file access property list with parallel I/O access */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
+ /* Get the capability flag of the VOL connector being used */
+ VRFY((H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) >= 0), "H5Pget_vol_cap_flags succeeded");
+
/* Initialize testing framework */
TestInit(argv[0], usage, parse_options);
@@ -534,7 +543,6 @@ main(int argc, char **argv)
TestInfo(argv[0]);
/* setup file access property list */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* Parse command line arguments */
@@ -561,6 +569,8 @@ main(int argc, char **argv)
/* Clean up test files */
h5_clean_files(FILENAME, fapl);
+ H5Pclose(fapl);
+
nerrors += GetTestNumErrs();
/* Gather errors from all processes */