summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-05-02 19:52:39 (GMT)
committerGitHub <noreply@github.com>2023-05-02 19:52:39 (GMT)
commitf8a1b3ceec485829ccdd3034ef2be68029f1a66e (patch)
tree5563ca3059b8cbda43f7f1e0ffedf7985f71a4bc /testpar
parent41fd8e66a9f837a1adf36a0253e29440d82ff522 (diff)
downloadhdf5-f8a1b3ceec485829ccdd3034ef2be68029f1a66e.zip
hdf5-f8a1b3ceec485829ccdd3034ef2be68029f1a66e.tar.gz
hdf5-f8a1b3ceec485829ccdd3034ef2be68029f1a66e.tar.bz2
Add initial version of HDF5 API tests (#2877)
Diffstat (limited to 'testpar')
-rw-r--r--testpar/API/CMakeLists.txt279
-rw-r--r--testpar/API/H5_api_async_test_parallel.c3668
-rw-r--r--testpar/API/H5_api_async_test_parallel.h29
-rw-r--r--testpar/API/H5_api_attribute_test_parallel.c47
-rw-r--r--testpar/API/H5_api_attribute_test_parallel.h20
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.c8149
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.h20
-rw-r--r--testpar/API/H5_api_datatype_test_parallel.c47
-rw-r--r--testpar/API/H5_api_datatype_test_parallel.h20
-rw-r--r--testpar/API/H5_api_file_test_parallel.c367
-rw-r--r--testpar/API/H5_api_file_test_parallel.h20
-rw-r--r--testpar/API/H5_api_group_test_parallel.c47
-rw-r--r--testpar/API/H5_api_group_test_parallel.h20
-rw-r--r--testpar/API/H5_api_link_test_parallel.c47
-rw-r--r--testpar/API/H5_api_link_test_parallel.h20
-rw-r--r--testpar/API/H5_api_misc_test_parallel.c47
-rw-r--r--testpar/API/H5_api_misc_test_parallel.h20
-rw-r--r--testpar/API/H5_api_object_test_parallel.c47
-rw-r--r--testpar/API/H5_api_object_test_parallel.h20
-rw-r--r--testpar/API/H5_api_test_parallel.c338
-rw-r--r--testpar/API/H5_api_test_parallel.h188
-rw-r--r--testpar/API/t_bigio.c1942
-rw-r--r--testpar/API/t_chunk_alloc.c512
-rw-r--r--testpar/API/t_coll_chunk.c1417
-rw-r--r--testpar/API/t_coll_md_read.c654
-rw-r--r--testpar/API/t_dset.c4335
-rw-r--r--testpar/API/t_file.c1032
-rw-r--r--testpar/API/t_file_image.c371
-rw-r--r--testpar/API/t_filter_read.c564
-rw-r--r--testpar/API/t_mdset.c2814
-rw-r--r--testpar/API/t_ph5basic.c192
-rw-r--r--testpar/API/t_prop.c646
-rw-r--r--testpar/API/t_pshutdown.c150
-rw-r--r--testpar/API/t_shapesame.c4516
-rw-r--r--testpar/API/t_span_tree.c2622
-rw-r--r--testpar/API/testphdf5.c1007
-rw-r--r--testpar/API/testphdf5.h343
-rw-r--r--testpar/CMakeLists.txt20
38 files changed, 36597 insertions, 0 deletions
diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt
new file mode 100644
index 0000000..5eb69c4
--- /dev/null
+++ b/testpar/API/CMakeLists.txt
@@ -0,0 +1,279 @@
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+#------------------------------------------------------------------------------
+# Set module path
+#------------------------------------------------------------------------------
+set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake")
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH})
+
+#------------------------------------------------------------------------------
+# Setup for API tests
+#------------------------------------------------------------------------------
+
+# Ported HDF5 tests
+set (HDF5_API_PAR_TESTS_EXTRA
+ t_bigio
+ t_pshutdown
+ t_shapesame
+ testphdf5
+)
+
+# List of files generated by the HDF5 API tests which
+# should be cleaned up in case the test failed to remove
+# them
+set (HDF5_API_PAR_TESTS_FILES
+ H5_api_test_parallel.h5
+ H5_api_async_test_parallel.h5
+ H5_api_async_test_parallel_0.h5
+ H5_api_async_test_parallel_1.h5
+ H5_api_async_test_parallel_2.h5
+ H5_api_async_test_parallel_3.h5
+ H5_api_async_test_parallel_4.h5
+ test_file_parallel.h5
+ split_comm_file.h5
+)
+
+#-----------------------------------------------------------------------------
+# Build the main API test executable
+#-----------------------------------------------------------------------------
+foreach (api_test ${HDF5_API_TESTS})
+ set (HDF5_API_PAR_TEST_SRCS
+ ${HDF5_API_PAR_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_${api_test}_test_parallel.c
+ )
+endforeach ()
+
+set (HDF5_API_PAR_TEST_SRCS
+ ${HDF5_API_PAR_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test_parallel.c
+ ${HDF5_TEST_API_SRC_DIR}/H5_api_test_util.c
+)
+
+add_executable (h5_api_test_parallel ${HDF5_API_PAR_TEST_SRCS})
+target_include_directories (
+ h5_api_test_parallel
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_PAR_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_TEST_API_PAR_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+)
+target_compile_options (
+ h5_api_test_parallel
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+)
+target_compile_definitions (
+ h5_api_test_parallel
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+)
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_parallel STATIC)
+ target_link_libraries (
+ h5_api_test_parallel
+ PRIVATE
+ ${HDF5_TEST_LIB_TARGET}
+ ${HDF5_LIB_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+else ()
+ TARGET_C_PROPERTIES (h5_api_test_parallel SHARED)
+ target_link_libraries (
+ h5_api_test_parallel
+ PRIVATE
+ ${HDF5_TEST_LIBSH_TARGET}
+ ${HDF5_LIBSH_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+endif ()
+set_target_properties (
+ h5_api_test_parallel
+ PROPERTIES
+ FOLDER test/par/API
+)
+# Add Target to clang-format
+if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_parallel_FORMAT h5_api_test_parallel)
+endif ()
+
+#-----------------------------------------------------------------------------
+# Build the ported HDF5 test executables
+#-----------------------------------------------------------------------------
+foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA})
+ unset (HDF5_API_PAR_TEST_EXTRA_SRCS)
+
+ set (HDF5_API_PAR_TEST_EXTRA_SRCS
+ ${HDF5_API_PAR_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c
+ )
+
+ if (${api_test_extra} STREQUAL "testphdf5")
+ set (HDF5_API_PAR_TEST_EXTRA_SRCS
+ ${HDF5_API_PAR_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_ph5basic.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_file.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_dset.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_mdset.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_chunk.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_span_tree.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_prop.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_file_image.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_md_read.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_chunk_alloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_filter_read.c
+ )
+ endif ()
+
+ add_executable (h5_api_test_parallel_${api_test_extra} ${HDF5_API_PAR_TEST_EXTRA_SRCS})
+ target_include_directories (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_PAR_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_TEST_API_PAR_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+ )
+ target_compile_options (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+ )
+ target_compile_definitions (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+ )
+ if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC)
+ target_link_libraries (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ ${HDF5_TEST_LIB_TARGET}
+ ${HDF5_LIB_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+ else ()
+ TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} SHARED)
+ target_link_libraries (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ ${HDF5_TEST_LIBSH_TARGET}
+ ${HDF5_LIBSH_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+ endif ()
+ set_target_properties (
+ h5_api_test_parallel_${api_test_extra}
+ PROPERTIES
+ FOLDER test/par/API
+ )
+ # Add Target to clang-format
+ if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_parallel_${api_test_extra}_FORMAT h5_api_test_parallel_${api_test_extra})
+ endif ()
+endforeach ()
+
+#-----------------------------------------------------------------------------
+# Add tests if HDF5 parallel testing is enabled
+#-----------------------------------------------------------------------------
+if (HDF5_TEST_PARALLEL)
+ if (HDF5_TEST_API_ENABLE_DRIVER)
+ if ("${HDF5_TEST_API_SERVER}" STREQUAL "")
+ message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.")
+ endif ()
+
+ # Driver options
+ if (HDF5_TEST_API_SERVER_ALLOW_ERRORS)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS --allow-server-errors)
+ endif ()
+ if (HDF5_TEST_API_CLIENT_HELPER)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-helper ${HDF5_TEST_API_CLIENT_HELPER}
+ )
+ endif ()
+ if (HDF5_TEST_API_CLIENT_INIT)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-init ${HDF5_TEST_API_CLIENT_INIT}
+ )
+ endif ()
+
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_parallel_${api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+
+ set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_parallel_${hdf5_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+
+ # Hook external tests to same test suite
+ foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS})
+ add_test (
+ NAME "h5_api_ext_test_parallel_${ext_api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:${ext_api_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+ else ()
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_parallel_${api_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ ${MPIEXEC_POSTFLAGS}
+ )
+
+ set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_parallel_${hdf5_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
+ ${MPIEXEC_POSTFLAGS}
+ )
+ endforeach ()
+ endif ()
+endif ()
diff --git a/testpar/API/H5_api_async_test_parallel.c b/testpar/API/H5_api_async_test_parallel.c
new file mode 100644
index 0000000..dcb5e8d
--- /dev/null
+++ b/testpar/API/H5_api_async_test_parallel.c
@@ -0,0 +1,3668 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_async_test_parallel.h"
+
+#ifdef H5ESpublic_H
+
+static int test_one_dataset_io(void);
+static int test_multi_dataset_io(void);
+static int test_multi_file_dataset_io(void);
+static int test_multi_file_grp_dset_io(void);
+static int test_set_extent(void);
+static int test_attribute_exists(void);
+static int test_attribute_io(void);
+static int test_attribute_io_tconv(void);
+static int test_attribute_io_compound(void);
+static int test_group(void);
+static int test_link(void);
+static int test_ocopy_orefresh(void);
+static int test_file_reopen(void);
+
+/*
+ * The array of parallel async tests to be performed.
+ */
+static int (*par_async_tests[])(void) = {
+ test_one_dataset_io,
+ test_multi_dataset_io,
+ test_multi_file_dataset_io,
+ test_multi_file_grp_dset_io,
+ test_set_extent,
+ test_attribute_exists,
+ test_attribute_io,
+ test_attribute_io_tconv,
+ test_attribute_io_compound,
+ test_group,
+ test_link,
+ test_ocopy_orefresh,
+ test_file_reopen,
+};
+
+hbool_t coll_metadata_read = TRUE;
+
+/* Highest "printf" file created (starting at 0) */
+int max_printf_file = -1;
+
+/*
+ * Create file and dataset. Each rank writes to a portion
+ * of the dataset.
+ */
+#define ONE_DATASET_IO_TEST_SPACE_RANK 2
+static int
+test_one_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("single dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ONE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((space_id = H5Screate_simple(ONE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(single_dset_eswait)
+ {
+ TESTING_2("synchronization using H5ESwait()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_eswait);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_eswait);
+
+ PART_BEGIN(single_dset_dclose)
+ {
+ TESTING_2("synchronization using H5Dclose()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = (int)i;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dclose);
+ } /* end if */
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(single_dset_dclose);
+
+ PART_BEGIN(single_dset_dflush)
+ {
+ TESTING_2("synchronization using H5Oflush_async()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = 10 * (int)i;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dflush);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_dflush);
+
+ PART_BEGIN(single_dset_fclose)
+ {
+ TESTING_2("synchronization using H5Fclose()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = (int)i + 5;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Reopen the file asynchronously. */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef ONE_DATASET_IO_TEST_SPACE_RANK
+
+/*
+ * Create file and multiple datasets. Each rank writes to a
+ * portion of each dataset and reads back their portion of
+ * each dataset.
+ */
+#define MULTI_DATASET_IO_TEST_SPACE_RANK 2
+#define MULTI_DATASET_IO_TEST_NDSETS 5
+static int
+test_multi_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id[MULTI_DATASET_IO_TEST_NDSETS] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char dset_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_DATASET_IO_TEST_NDSETS;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_dset_open)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("keeping datasets open");
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ size_t buf_end_idx;
+
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id, dset_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_dset_open);
+
+ PART_BEGIN(multi_dset_close)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ size_t buf_end_idx;
+
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_close);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_dset_close);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++)
+ H5Dclose(dset_id[i]);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_DATASET_IO_TEST_SPACE_RANK
+#undef MULTI_DATASET_IO_TEST_NDSETS
+
+/*
+ * Create multiple files, each with a single dataset. Each rank writes
+ * to a portion of each dataset and reads from a portion of each dataset.
+ */
+#define MULTI_FILE_DATASET_IO_TEST_SPACE_RANK 2
+#define MULTI_FILE_DATASET_IO_TEST_NFILES 5
+static int
+test_multi_file_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, j, data_size, num_in_progress;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t file_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t dset_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char file_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi file dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_FILE_DATASET_IO_TEST_NFILES;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_dset_open)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("keeping files and datasets open");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id[i] = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id[i], "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id[0], &is_native_vol) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id[i], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_open);
+
+ PART_BEGIN(multi_file_dset_dclose)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id[i], H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_dclose);
+ } /* end if */
+
+ /* Close the files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++)
+ if (H5Fclose(file_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_dclose);
+
+ PART_BEGIN(multi_file_dset_fclose)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing files between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank + 5;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ H5Dclose(dset_id[i]);
+ H5Fclose(file_id[i]);
+ }
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_FILE_DATASET_IO_TEST_SPACE_RANK
+#undef MULTI_FILE_DATASET_IO_TEST_NFILES
+
+/*
+ * Create multiple files, each with a single group and dataset. Each rank
+ * writes to a portion of each dataset and reads from a portion of each dataset.
+ */
+#define MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK 2
+#define MULTI_FILE_GRP_DSET_IO_TEST_NFILES 5
+static int
+test_multi_file_grp_dset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t grp_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char file_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi file dataset I/O with groups")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_FILE_GRP_DSET_IO_TEST_NFILES;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_grp_dset_no_kick)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("without intermediate calls to H5ESwait()");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_no_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_no_kick);
+
+ PART_BEGIN(multi_file_grp_dset_kick)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("with intermediate calls to H5ESwait() (0 timeout)");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_kick);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(grp_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK
+#undef MULTI_FILE_GRP_DSET_IO_TEST_NFILES
+
+/*
+ * Creates a single file and dataset, then each rank writes to a portion
+ * of the dataset. Next, the dataset is continually extended in the first
+ * dimension by 1 "row" per mpi rank and partially written to by each rank.
+ * Finally, each rank reads from a portion of the dataset.
+ */
+#define SET_EXTENT_TEST_SPACE_RANK 2
+#define SET_EXTENT_TEST_NUM_EXTENDS 6
+static int
+test_set_extent(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *maxdims = NULL;
+ hsize_t *cdims = NULL;
+ hsize_t start[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t stride[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t count[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t block[SET_EXTENT_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t space_id_out = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ htri_t tri_ret;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("extending dataset");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, or flush aren't supported "
+ "with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(SET_EXTENT_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if (NULL == (maxdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate max dataspace dimension buffer\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (cdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate chunk dimension buffer\n");
+ TEST_ERROR;
+ }
+
+ for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) {
+ maxdims[i] = (i == 0) ? dims[i] + (hsize_t)(SET_EXTENT_TEST_NUM_EXTENDS * mpi_size) : dims[i];
+ cdims[i] = (dims[i] == 1) ? 1 : dims[i] / 2;
+ }
+
+ /* Create file dataspace */
+ if ((space_id = H5Screate_simple(SET_EXTENT_TEST_SPACE_RANK, dims, maxdims)) < 0)
+ TEST_ERROR;
+
+ /* Create DCPL */
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Set chunking */
+ if (H5Pset_chunk(dcpl_id, SET_EXTENT_TEST_SPACE_RANK, cdims) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < SET_EXTENT_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= SET_EXTENT_TEST_NUM_EXTENDS;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Extend the dataset in the first dimension n times, extending by 1 "row" per
+ * mpi rank involved on each iteration. Each rank will claim one of the new
+ * "rows" for I/O in an interleaved fashion. */
+ for (i = 0; i < SET_EXTENT_TEST_NUM_EXTENDS; i++) {
+ /* No need to extend on the first iteration */
+ if (i) {
+ /* Extend datapace */
+ dims[0] += (hsize_t)mpi_size;
+ if (H5Sset_extent_simple(space_id, SET_EXTENT_TEST_SPACE_RANK, dims, maxdims) < 0)
+ TEST_ERROR;
+
+ /* Extend dataset asynchronously */
+ if (H5Dset_extent_async(dset_id, dims, es_id) < 0)
+ TEST_ERROR;
+
+ /* Select hyperslab in file space to match new region */
+ for (j = 0; j < SET_EXTENT_TEST_SPACE_RANK; j++) {
+ if (j == 0) {
+ start[j] = (hsize_t)mpi_rank;
+ block[j] = 1;
+ stride[j] = (hsize_t)mpi_size;
+ count[j] = i + 1;
+ }
+ else {
+ start[j] = 0;
+ block[j] = dims[j];
+ stride[j] = 1;
+ count[j] = 1;
+ }
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Adjust memory dataspace to match as well */
+ {
+ hsize_t mdims[] = {(i + 1) * (data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int))};
+
+ if (H5Sset_extent_simple(mspace_id, 1, mdims, NULL) < 0)
+ TEST_ERROR;
+
+ if (H5Sselect_all(mspace_id) < 0)
+ TEST_ERROR;
+ }
+ } /* end if */
+
+ /* Get dataset dataspace */
+ if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Verify extent is correct */
+ if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+
+ /* Close output dataspace */
+ if (H5Sclose(space_id_out) < 0)
+ TEST_ERROR;
+
+ /* Write the dataset slice asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < 0)
+ TEST_ERROR;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the entire dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed, expected %d but got %d\n", write_buf[i], read_buf[i]);
+ goto error;
+ } /* end if */
+
+ /* Close dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get dataset dataspace asynchronously */
+ if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Verify the extents match */
+ if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (cdims) {
+ HDfree(cdims);
+ cdims = NULL;
+ }
+
+ if (maxdims) {
+ HDfree(maxdims);
+ maxdims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (cdims)
+ HDfree(cdims);
+ if (maxdims)
+ HDfree(maxdims);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Sclose(space_id_out);
+ H5Dclose(dset_id);
+ H5Pclose(dcpl_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef SET_EXTENT_TEST_SPACE_RANK
+#undef SET_EXTENT_TEST_NUM_EXTENDS
+
+/*
+ * Creates an attribute on a dataset. All ranks check to see
+ * if the attribute exists before and after creating the
+ * attribute on the dataset.
+ */
+#define ATTRIBUTE_EXISTS_TEST_SPACE_RANK 2
+static int
+test_attribute_exists(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t num_in_progress;
+ hbool_t exists1 = false;
+ hbool_t exists2 = false;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+
+ TESTING("H5Aexists()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_exists_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the create takes place after the existence check.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the existence check takes place after the create.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists2, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if H5Aexists returned the correct values */
+ if (exists1)
+ FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist")
+ if (!exists2)
+ FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist")
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef ATTRIBUTE_EXISTS_TEST_SPACE_RANK
+
+/*
+ * Creates a file, dataset and attribute. Each rank writes to
+ * the attribute. Then, each rank reads the attribute and
+ * verifies the data is correct.
+ */
+#define ATTRIBUTE_IO_TEST_SPACE_RANK 2
+static int
+test_attribute_io(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("attribute I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ write_buf[i] = 10 * (int)i;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id = H5Aopen_async(dset_id, "attr", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close out of order to see if it trips things up */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Creates a file, dataset and attribute in parallel. Each rank writes to
+ * the attribute with datatype conversion involved, then reads back the
+ * attribute and verifies the data is correct.
+ */
+#define ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK 2
+static int
+test_attribute_io_tconv(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("attribute I/O with type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, attribute, or flush aren't supported with this "
+ "connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_tconv", H5T_STD_U16BE, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ write_buf[i] = 10 * (int)i;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id =
+ H5Aopen_by_name_async(file_id, "attr_dset", "attr_tconv", H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Creates a file, dataset and attribute in parallel. Each rank writes to
+ * the attribute with a compound datatype, then reads back the attribute
+ * and verifies the data is correct.
+ */
+typedef struct tattr_cmpd_t {
+ int a;
+ int b;
+} tattr_cmpd_t;
+
+#define ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK 2
+static int
+test_attribute_io_compound(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mtype_id = H5I_INVALID_HID;
+ hid_t ftype_id = H5I_INVALID_HID;
+ hid_t mtypea_id = H5I_INVALID_HID;
+ hid_t mtypeb_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ tattr_cmpd_t *write_buf = NULL;
+ tattr_cmpd_t *read_buf = NULL;
+ tattr_cmpd_t *fbuf = NULL;
+
+ TESTING("attribute I/O with compound type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create datatype */
+ if ((mtype_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypea_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypea_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypeb_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypeb_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((ftype_id = H5Tcreate(H5T_COMPOUND, 2 + 8)) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "a_name", 0, H5T_STD_U16BE) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "b_name", 2, H5T_STD_I64LE) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_cmpd", ftype_id, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(tattr_cmpd_t);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (fbuf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read verification\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a = 10 * (int)i;
+ write_buf[i].b = (10 * (int)i) + 1;
+ }
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, mtype_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].a = write_buf[i].a;
+ fbuf[i].b = write_buf[i].b;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously (element a only) */
+ if (H5Aread_async(attr_id, mtypea_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != -2) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously (element b only) */
+ if (H5Aread_async(attr_id, mtypeb_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != -2) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Update write_buf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a += 2 * 6 * 10;
+ write_buf[i].b += 2 * 6 * 10;
+ }
+
+ /* Write the attribute asynchronously (element a only) */
+ if (H5Awrite_async(attr_id, mtypea_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].a = write_buf[i].a;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Update write_buf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a += 2 * 6 * 10;
+ write_buf[i].b += 2 * 6 * 10;
+ }
+
+ /* Write the attribute asynchronously (element b only) */
+ if (H5Awrite_async(attr_id, mtypeb_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].b = write_buf[i].b;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(ftype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypea_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypeb_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fbuf) {
+ HDfree(fbuf);
+ fbuf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (fbuf)
+ HDfree(fbuf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Tclose(mtype_id);
+ H5Tclose(ftype_id);
+ H5Tclose(mtypea_id);
+ H5Tclose(mtypeb_id);
+ H5Aclose(attr_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests async group interfaces in parallel
+ */
+static int
+test_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ H5G_info_t info1;
+ H5G_info_t info2;
+ H5G_info_t info3;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("group operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, group more, creation order, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "group_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create 3 subgroups asynchronously, the first with no sub-subgroups, the
+ * second with 1, and the third with 2 */
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the file asynchronously. This will effectively work as a barrier,
+ * guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_async */
+ /* Open group1 asynchronously */
+ if ((group_id = H5Gopen_async(parent_group_id, "group1", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get info */
+ if (H5Gget_info_async(group_id, &info1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_idx_async */
+ if (H5Gget_info_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &info2,
+ H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_name_async */
+ if (H5Gget_info_by_name_async(parent_group_id, "group3", &info3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify group infos */
+ if (info1.nlinks != 0)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+ if (info2.nlinks != 1)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+ if (info3.nlinks != 2)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+
+ /* Close */
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests async link interfaces in parallel
+ */
+static int
+test_link(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hbool_t existsh1;
+ hbool_t existsh2;
+ hbool_t existsh3;
+ hbool_t existss1;
+ hbool_t existss2;
+ hbool_t existss3;
+ size_t num_in_progress;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+
+ TESTING("link operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, link, hard link, soft link, flush, or creation order "
+ "aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "link_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create subgroup asynchronously. */
+ if ((group_id = H5Gcreate_async(parent_group_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the link to the subgroup is visible to later tasks.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create hard link asynchronously */
+ if (H5Lcreate_hard_async(parent_group_id, "group", parent_group_id, "hard_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the soft link create takes place after the hard
+ * link create. Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create soft link asynchronously */
+ if (H5Lcreate_soft_async("/link_parent/group", parent_group_id, "soft_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the writes.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete soft link by index */
+ if (H5Ldelete_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, H5P_DEFAULT, es_id) <
+ 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete hard link */
+ if (H5Ldelete_async(parent_group_id, "hard_link", H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if existence returns were correct */
+ if (!existsh1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (!existss1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (!existsh2)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (existss2)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+
+ /* Close */
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests H5Ocopy_async and H5Orefresh_async in parallel
+ */
+#define OCOPY_REFRESH_TEST_SPACE_RANK 2
+static int
+test_ocopy_orefresh(void)
+{
+ hsize_t *dims = NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+
+ TESTING("H5Ocopy() and H5Orefresh()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, object more, flush, or refresh "
+ "aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(OCOPY_REFRESH_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(OCOPY_REFRESH_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "ocopy_parent", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create dataset asynchronously. */
+ if ((dset_id = H5Dcreate_async(parent_group_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the copy takes place after dataset create.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Copy dataset */
+ if (H5Ocopy_async(parent_group_id, "dset", parent_group_id, "copied_dset", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the dataset open takes place copy.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if (!coll_metadata_read) {
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ /* Open the copied dataset asynchronously */
+ if ((dset_id = H5Dopen_async(parent_group_id, "copied_dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Refresh the copied dataset asynchronously */
+ if (H5Orefresh(dset_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Dclose(dset_id);
+ H5Gclose(parent_group_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef OCOPY_REFRESH_TEST_SPACE_RANK
+
+/*
+ * Tests H5Freopen_async in parallel
+ */
+static int
+test_file_reopen(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t reopened_file_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("H5Freopen()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or file more aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Reopen file asynchronously */
+ if ((reopened_file_id = H5Freopen_async(file_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Fclose_async(reopened_file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(reopened_file_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ char file_name[64];
+ int i;
+
+ if (MAINPROCESS) {
+ H5Fdelete(PAR_ASYNC_API_TEST_FILE, H5P_DEFAULT);
+ for (i = 0; i <= max_printf_file; i++) {
+ snprintf(file_name, 64, PAR_ASYNC_API_TEST_FILE_PRINTF, i);
+ H5Fdelete(file_name, H5P_DEFAULT);
+ } /* end for */
+ }
+}
+
+int
+H5_api_async_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" Async APIs aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) {
+ nerrors += (*par_async_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ if (MAINPROCESS) {
+ HDprintf("\n * Re-testing with independent metadata reads *\n");
+ }
+
+ coll_metadata_read = FALSE;
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) {
+ nerrors += (*par_async_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ return nerrors;
+}
+
+#else /* H5ESpublic_H */
+
+int
+H5_api_async_test_parallel(void)
+{
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ HDprintf("SKIPPED due to no async support in HDF5 library\n");
+
+ return 0;
+}
+
+#endif
diff --git a/testpar/API/H5_api_async_test_parallel.h b/testpar/API/H5_api_async_test_parallel.h
new file mode 100644
index 0000000..9e4340c
--- /dev/null
+++ b/testpar/API/H5_api_async_test_parallel.h
@@ -0,0 +1,29 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ASYNC_TEST_PARALLEL_H_
+#define H5_API_ASYNC_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_async_test_parallel(void);
+
+/********************************************************
+ * *
+ * API parallel async test defines *
+ * *
+ ********************************************************/
+
+#define PAR_ASYNC_API_TEST_FILE "H5_api_async_test_parallel.h5"
+#define PAR_ASYNC_API_TEST_FILE_PRINTF "H5_api_async_test_parallel_%d.h5"
+
+#endif /* H5_API_ASYNC_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_attribute_test_parallel.c b/testpar/API/H5_api_attribute_test_parallel.c
new file mode 100644
index 0000000..cffbfcd
--- /dev/null
+++ b/testpar/API/H5_api_attribute_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_attribute_test_parallel.h"
+
+/*
+ * The array of parallel attribute tests to be performed.
+ */
+static int (*par_attribute_tests[])(void) = {NULL};
+
+int
+H5_api_attribute_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Attribute Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_attribute_tests); i++) {
+ /* nerrors += (*par_attribute_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_attribute_test_parallel.h b/testpar/API/H5_api_attribute_test_parallel.h
new file mode 100644
index 0000000..81802ae
--- /dev/null
+++ b/testpar/API/H5_api_attribute_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ATTRIBUTE_TEST_PARALLEL_H_
+#define H5_API_ATTRIBUTE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_attribute_test_parallel(void);
+
+#endif /* H5_API_ATTRIBUTE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c
new file mode 100644
index 0000000..fd02a7f
--- /dev/null
+++ b/testpar/API/H5_api_dataset_test_parallel.c
@@ -0,0 +1,8149 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * XXX: Better documentation for each test about how the selections get
+ * split up among MPI ranks.
+ */
+#include "H5_api_dataset_test_parallel.h"
+
+static int test_write_dataset_data_verification(void);
+static int test_write_dataset_independent(void);
+static int test_write_dataset_one_proc_0_selection(void);
+static int test_write_dataset_one_proc_none_selection(void);
+static int test_write_dataset_one_proc_all_selection(void);
+static int test_write_dataset_hyper_file_all_mem(void);
+static int test_write_dataset_all_file_hyper_mem(void);
+static int test_write_dataset_point_file_all_mem(void);
+static int test_write_dataset_all_file_point_mem(void);
+static int test_write_dataset_hyper_file_point_mem(void);
+static int test_write_dataset_point_file_hyper_mem(void);
+static int test_read_dataset_one_proc_0_selection(void);
+static int test_read_dataset_one_proc_none_selection(void);
+static int test_read_dataset_one_proc_all_selection(void);
+static int test_read_dataset_hyper_file_all_mem(void);
+static int test_read_dataset_all_file_hyper_mem(void);
+static int test_read_dataset_point_file_all_mem(void);
+static int test_read_dataset_all_file_point_mem(void);
+static int test_read_dataset_hyper_file_point_mem(void);
+static int test_read_dataset_point_file_hyper_mem(void);
+
+/*
+ * Chunking tests
+ */
+static int test_write_multi_chunk_dataset_same_shape_read(void);
+static int test_write_multi_chunk_dataset_diff_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_same_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_diff_shape_read(void);
+
+/*
+ * The array of parallel dataset tests to be performed.
+ */
+static int (*par_dataset_tests[])(void) = {
+ test_write_dataset_data_verification,
+ test_write_dataset_independent,
+ test_write_dataset_one_proc_0_selection,
+ test_write_dataset_one_proc_none_selection,
+ test_write_dataset_one_proc_all_selection,
+ test_write_dataset_hyper_file_all_mem,
+ test_write_dataset_all_file_hyper_mem,
+ test_write_dataset_point_file_all_mem,
+ test_write_dataset_all_file_point_mem,
+ test_write_dataset_hyper_file_point_mem,
+ test_write_dataset_point_file_hyper_mem,
+ test_read_dataset_one_proc_0_selection,
+ test_read_dataset_one_proc_none_selection,
+ test_read_dataset_one_proc_all_selection,
+ test_read_dataset_hyper_file_all_mem,
+ test_read_dataset_all_file_hyper_mem,
+ test_read_dataset_point_file_all_mem,
+ test_read_dataset_all_file_point_mem,
+ test_read_dataset_hyper_file_point_mem,
+ test_read_dataset_point_file_hyper_mem,
+ test_write_multi_chunk_dataset_same_shape_read,
+ test_write_multi_chunk_dataset_diff_shape_read,
+ test_overwrite_multi_chunk_dataset_same_shape_read,
+ test_overwrite_multi_chunk_dataset_diff_shape_read,
+};
+
+/*
+ * A test to ensure that data is read back correctly from
+ * a dataset after it has been written in parallel. The test
+ * covers simple examples of using H5S_ALL selections,
+ * hyperslab selections and point selections.
+ */
+#define DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK 3
+#define DATASET_WRITE_DATA_VERIFY_TEST_NUM_POINTS 10
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME "dataset_write_data_verification_test"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1 "dataset_write_data_verification_all"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2 "dataset_write_data_verification_hyperslab"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3 "dataset_write_data_verification_points"
+static int
+test_write_dataset_data_verification(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t *points = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART("verification of dataset data using H5Dwrite then H5Dread");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dwrite_all_read)
+ {
+ hbool_t op_failed = FALSE;
+
+ TESTING_2("H5Dwrite using H5S_ALL then H5Dread");
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ /*
+ * Write data to dataset on rank 0 only. All ranks will read the data back.
+ */
+ if (MAINPROCESS) {
+ for (i = 0, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL != (write_buf = HDmalloc(data_size))) {
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = (int)i;
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0)
+ op_failed = TRUE;
+ }
+ else
+ op_failed = TRUE;
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ }
+
+ if (MPI_SUCCESS !=
+ MPI_Allreduce(MPI_IN_PLACE, &op_failed, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if dataset write on rank 0 succeeded\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (op_failed == TRUE) {
+ H5_FAILED();
+ HDprintf(" dataset write on rank 0 failed!\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" H5S_ALL selection data verification failed\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_all_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dwrite_hyperslab_read)
+ {
+ TESTING_2("H5Dwrite using hyperslab selection then H5Dread");
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Each MPI rank writes to a single row in the second dimension
+ * and the entirety of the following dimensions. The combined
+ * selections from all MPI ranks spans the first dimension.
+ */
+ for (i = 0; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" hyperslab selection data verification failed\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_hyperslab_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dwrite_point_sel_read)
+ {
+ TESTING_2("H5Dwrite using point selection then H5Dread");
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ /* Use different data than the previous test to ensure that the data actually changed. */
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_size - mpi_rank;
+
+ if (NULL == (points = HDmalloc(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK *
+ (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ /* Each MPI rank writes to a single row in the second dimension
+ * and the entirety of the following dimensions. The combined
+ * selections from all MPI ranks spans the first dimension.
+ */
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select elements in dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] !=
+ (mpi_size - (int)i)) {
+ H5_FAILED();
+ HDprintf(" point selection data verification failed\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_point_sel_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that independent dataset writes function
+ * as expected. First, two datasets are created in the file.
+ * Then, the even MPI ranks first write to dataset 1, followed
+ * by dataset 2. The odd MPI ranks first write to dataset 2,
+ * followed by dataset 1. After this, the data is read back from
+ * each dataset and verified.
+ */
+#define DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK 3
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME "independent_dataset_write_test"
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1 "dset1"
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2 "dset2"
+static int
+test_write_dataset_independent(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t count[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t block[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("independent writing to different datasets by different ranks");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Setup dimensions of overall datasets and slabs local
+ * to the MPI rank.
+ */
+ if (generate_random_parallel_dimensions(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* create a dataset collectively */
+ if ((dset_id1 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1,
+ DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create first dataset\n");
+ goto error;
+ }
+ if ((dset_id2 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2,
+ DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create second dataset\n");
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ for (i = 0; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ /*
+ * To test the independent orders of writes between processes, all
+ * even number processes write to dataset1 first, then dataset2.
+ * All odd number processes write to dataset2 first, then dataset1.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (mpi_rank % 2 == 0) {
+ if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" even ranks failed to write to dataset 1\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" even ranks failed to write to dataset 2\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ else {
+ if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" odd ranks failed to write to dataset 2\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" odd ranks failed to write to dataset 1\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ H5Sclose(mspace_id);
+ mspace_id = H5I_INVALID_HID;
+ H5Sclose(fspace_id);
+ fspace_id = H5I_INVALID_HID;
+ H5Dclose(dset_id1);
+ dset_id1 = H5I_INVALID_HID;
+ H5Dclose(dset_id2);
+ dset_id2 = H5I_INVALID_HID;
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id1 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1);
+ goto error;
+ }
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ /*
+ * Verify that data has been written correctly.
+ */
+ if ((fspace_id = H5Dget_space(dset_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" dataset 1 data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (H5Dread(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" dataset 2 data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id1);
+ H5Dclose(dset_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks select 0 rows in a hyperslab selection.
+ */
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset"
+static int
+test_write_dataset_one_proc_0_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank selecting 0 rows");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ for (i = 0; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = MAINPROCESS ? 0 : 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = MAINPROCESS ? 0 : dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = MAINPROCESS ? 0 : 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ if (i != 0) {
+ for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks call H5Sselect_none.
+ */
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset"
+static int
+test_write_dataset_one_proc_none_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank using 'none' selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ for (i = 0; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ if (i != 0) {
+ for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks use an ALL selection, while the other
+ * ranks write nothing.
+ */
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset"
+static int
+test_write_dataset_one_proc_all_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank using all selection; others none selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = (int)i;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_all(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'all' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE};
+
+ if (!MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a hyperslab selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ *
+ * XXX: Currently pulls from invalid memory locations.
+ */
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME "hyper_sel_file_all_sel_mem_write_test"
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME "hyper_sel_file_all_sel_mem_dset"
+static int
+test_write_dataset_hyper_file_all_mem(void)
+{
+#ifdef BROKEN
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+#endif
+
+ TESTING("write to dataset with hyperslab sel. for file space; all sel. for memory");
+
+#ifdef BROKEN
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ for (i = 0; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, fspace_id, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+#else
+ SKIPPED();
+#endif
+
+ return 0;
+
+#ifdef BROKEN
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#endif
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * an all selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_write_test"
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset"
+static int
+test_write_dataset_all_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with all sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> all
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = (int)((i / 2) + (i % 2));
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ /*
+ * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks
+ * might be stressful on system resources. There's also no guarantee as to what the outcome
+ * would be, since the writes would be overlapping with each other.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a point selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ */
+static int
+test_write_dataset_point_file_all_mem(void)
+{
+ TESTING("write to dataset with point sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * an all selection in the file dataspace and a point selection
+ * in the memory dataspace.
+ */
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_write_test"
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset"
+static int
+test_write_dataset_all_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *points = NULL;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with all sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> all
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = (int)((i / 2) + (i % 2));
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ /*
+ * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks
+ * might be stressful on system resources. There's also no guarantee as to what the outcome
+ * would be, since the writes would be overlapping with each other.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE)};
+ int j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a hyperslab selection in the file dataspace and a point
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_write_test"
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset"
+static int
+test_write_dataset_hyper_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ hsize_t start[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with hyperslab sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> hyperslab
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = mpi_rank;
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+
+ for (i = 0; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE)};
+ int j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a point selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_write_test"
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset"
+static int
+test_write_dataset_point_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with point sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> point
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = mpi_rank;
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) *
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set hyperslab selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks select 0 rows in a hyperslab selection.
+ */
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_read_test"
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset"
+static int
+test_read_dataset_one_proc_0_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank selecting 0 rows");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ read_buf_size =
+ ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE);
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = MAINPROCESS ? 0 : 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = MAINPROCESS ? 0 : dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = MAINPROCESS ? 0 : 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (!MAINPROCESS) {
+ for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) {
+ if (((int *)read_buf)[i] != mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks call H5Sselect_none.
+ */
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_read_test"
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset"
+static int
+test_read_dataset_one_proc_none_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank using 'none' selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ read_buf_size =
+ ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE);
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (!MAINPROCESS) {
+ for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) {
+ if (((int *)read_buf)[i] != mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks use an ALL selection, while the other
+ * ranks read nothing.
+ */
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_read_test"
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset"
+static int
+test_read_dataset_one_proc_all_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank using all selection; others none selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ read_buf_size = (size_t)space_npoints * DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints};
+
+ if (!MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_all(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'all' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (MAINPROCESS) {
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < elem_per_proc; j++) {
+ int idx = (int)((i * elem_per_proc) + j);
+
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a hyperslab selection in the file dataspace and an all
+ * selection in the memory dataspace.
+ */
+static int
+test_read_dataset_hyper_file_all_mem(void)
+{
+ TESTING("read from dataset with hyperslab sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * an all selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_read_test"
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset"
+static int
+test_read_dataset_all_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with all sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks
+ * might be stressful on system resources.
+ */
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (MAINPROCESS) {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {(hsize_t)space_npoints};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {(hsize_t)(2 * space_npoints)};
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from all selection <-> hyperslab
+ * selection works correctly.
+ */
+ read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < 2 * elem_per_proc; j++) {
+ size_t idx = (i * 2 * elem_per_proc) + j;
+
+ if (j % 2 == 0) {
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ else {
+ if (((int *)read_buf)[idx] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a point selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ */
+static int
+test_read_dataset_point_file_all_mem(void)
+{
+ TESTING("read from dataset with point sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * an all selection in the file dataspace and a point selection
+ * in the memory dataspace.
+ */
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_read_test"
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset"
+static int
+test_read_dataset_all_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *points = NULL;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with all sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks
+ * might be stressful on system resources.
+ */
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (MAINPROCESS) {
+ hsize_t mdims[] = {(hsize_t)(2 * space_npoints)};
+ size_t j;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from all selection <-> point
+ * selection works correctly.
+ */
+ read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (NULL == (points = HDmalloc((size_t)space_npoints * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (size_t)space_npoints; i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)space_npoints, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < 2 * elem_per_proc; j++) {
+ size_t idx = (i * 2 * elem_per_proc) + j;
+
+ if (j % 2 == 0) {
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ else {
+ if (((int *)read_buf)[idx] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a hyperslab selection in the file dataspace and a point
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_read_test"
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset"
+static int
+test_read_dataset_hyper_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ hsize_t start[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with hyperslab sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> point
+ * selection works correctly.
+ */
+ read_buf_size = (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE);
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))};
+ size_t j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((size_t)(space_npoints / mpi_size) * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ goto error;
+ }
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0) {
+ if (((int *)read_buf)[i] != (int)mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ else {
+ if (((int *)read_buf)[i] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a point selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_read_test"
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset"
+static int
+test_read_dataset_point_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with point sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> hyperslab
+ * selection works correctly.
+ */
+ read_buf_size =
+ (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE);
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((size_t)((space_npoints / mpi_size) *
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(space_npoints / mpi_size); i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ goto error;
+ }
+
+ {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {(hsize_t)(space_npoints / mpi_size)};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set hyperslab selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0) {
+ if (((int *)read_buf)[i] != (int)mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ else {
+ if (((int *)read_buf)[i] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are the same shape. The dataset's first dimension grows
+ * with the number of MPI ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_same_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+static int
+test_write_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[1][DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE];
+
+ TESTING("write to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL ==
+ (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id =
+ H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) {
+ size_t j, k;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ size_t val =
+ ((j * chunk_dims[0]) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank); /* Additional value offset for each rank */
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are differently shaped. The dataset's first dimension grows
+ * with the number of MPI ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \
+ (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10)
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_diff_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+static int
+test_write_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]
+ [DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE];
+
+ TESTING("write to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL ==
+ (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id =
+ H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Create memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE,
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE};
+
+ if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) {
+ size_t j, k;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++)
+ for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) {
+ for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) {
+ size_t val = ((j * DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank);
+
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are the same shape.
+ * The dataset's first dimension grows with the number of MPI
+ * ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_same_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS 10
+static int
+test_overwrite_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[1][DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE];
+
+ TESTING("several overwrites to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset, but don't fill it with data yet.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group,
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to
+ * all other processes when they open the dataset */
+ if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set allocation time on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file on all ranks.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS; niter++) {
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+ i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) *
+ (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank;
+ i++) {
+ size_t j, k;
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ size_t val =
+ ((j * chunk_dims[0]) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank) /* Additional value offset for each rank */
+ + niter;
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are differently shaped.
+ * The dataset's first dimension grows with the number of MPI
+ * ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \
+ (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10)
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_diff_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS 10
+static int
+test_overwrite_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]
+ [DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE];
+
+ TESTING("several overwrites to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset, but don't fill it with data yet.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to
+ * all other processes when they open the dataset */
+ if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set allocation time on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file on all ranks.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Create memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE};
+
+ if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS; niter++) {
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+ i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) *
+ (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank;
+ i++) {
+ size_t j, k;
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++)
+ for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) {
+ for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) {
+ size_t val =
+ ((j * DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank) + niter;
+
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+int
+H5_api_dataset_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Dataset Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_dataset_tests); i++) {
+ nerrors += (*par_dataset_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_dataset_test_parallel.h b/testpar/API/H5_api_dataset_test_parallel.h
new file mode 100644
index 0000000..1e2cbd0
--- /dev/null
+++ b/testpar/API/H5_api_dataset_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATASET_TEST_PARALLEL_H_
+#define H5_API_DATASET_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_dataset_test_parallel(void);
+
+#endif /* H5_API_DATASET_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_datatype_test_parallel.c b/testpar/API/H5_api_datatype_test_parallel.c
new file mode 100644
index 0000000..7d090c0
--- /dev/null
+++ b/testpar/API/H5_api_datatype_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_datatype_test_parallel.h"
+
+/*
+ * The array of parallel datatype tests to be performed.
+ */
+static int (*par_datatype_tests[])(void) = {NULL};
+
+int
+H5_api_datatype_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Datatype Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_datatype_tests); i++) {
+ /* nerrors += (*par_datatype_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_datatype_test_parallel.h b/testpar/API/H5_api_datatype_test_parallel.h
new file mode 100644
index 0000000..0a2ba50
--- /dev/null
+++ b/testpar/API/H5_api_datatype_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATATYPE_TEST_PARALLEL_H_
+#define H5_API_DATATYPE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_datatype_test_parallel(void);
+
+#endif /* H5_API_DATATYPE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_file_test_parallel.c b/testpar/API/H5_api_file_test_parallel.c
new file mode 100644
index 0000000..20fb2ba
--- /dev/null
+++ b/testpar/API/H5_api_file_test_parallel.c
@@ -0,0 +1,367 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_file_test_parallel.h"
+
+static int test_create_file(void);
+static int test_open_file(void);
+static int test_split_comm_file_access(void);
+
+/*
+ * The array of parallel file tests to be performed.
+ */
+static int (*par_file_tests[])(void) = {
+ test_create_file,
+ test_open_file,
+ test_split_comm_file_access,
+};
+
+/*
+ * A test to ensure that a file can be created in parallel.
+ */
+#define FILE_CREATE_TEST_FILENAME "test_file_parallel.h5"
+static int
+test_create_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ TESTING("H5Fcreate");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fcreate(FILE_CREATE_TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", FILE_CREATE_TEST_FILENAME);
+ goto error;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a file can be opened in parallel.
+ */
+static int
+test_open_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Fopen");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fopen_rdonly)
+ {
+ TESTING_2("H5Fopen in read-only mode");
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDONLY, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-only mode\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Fopen_rdonly);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdonly);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fopen_rdwrite)
+ {
+ TESTING_2("H5Fopen in read-write mode");
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-write mode\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Fopen_rdwrite);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdwrite);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * XXX: SWMR open flags
+ */
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests file access by a communicator other than MPI_COMM_WORLD.
+ *
+ * Splits MPI_COMM_WORLD into two groups, where one (even_comm) contains
+ * the original processes of even ranks. The other (odd_comm) contains
+ * the original processes of odd ranks. Processes in even_comm create a
+ * file, then close it, using even_comm. Processes in old_comm just do
+ * a barrier using odd_comm. Then they all do a barrier using MPI_COMM_WORLD.
+ * If the file creation and close does not do correct collective action
+ * according to the communicator argument, the processes will freeze up
+ * sooner or later due to MPI_Barrier calls being mixed up.
+ */
+#define SPLIT_FILE_COMM_TEST_FILE_NAME "split_comm_file.h5"
+static int
+test_split_comm_file_access(void)
+{
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ int is_old;
+ int newrank;
+ int err_occurred = 0;
+
+ TESTING("file access with a split communicator");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* set up MPI parameters */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ is_old = mpi_rank % 2;
+ if (MPI_SUCCESS != MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm)) {
+ H5_FAILED();
+ HDprintf(" failed to split communicator!\n");
+ goto error;
+ }
+ MPI_Comm_rank(comm, &newrank);
+
+ if (is_old) {
+ /* odd-rank processes */
+ if (MPI_SUCCESS != MPI_Barrier(comm)) {
+ err_occurred = 1;
+ goto access_end;
+ }
+ }
+ else {
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+
+ MPI_Comm_rank(comm, &sub_mpi_rank);
+
+ /* setup file access template */
+ if ((fapl_id = create_mpi_fapl(comm, info, TRUE)) < 0) {
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* create the file collectively */
+ if ((file_id = H5Fcreate(SPLIT_FILE_COMM_TEST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* close the file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* delete the test file */
+ if (H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, fapl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* Release file-access template */
+ if (H5Pclose(fapl_id) < 0) {
+ err_occurred = 1;
+ goto access_end;
+ }
+ }
+access_end:
+
+ /* Get the collective results about whether an error occurred */
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &err_occurred, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Allreduce failed\n");
+ goto error;
+ }
+
+ if (err_occurred) {
+ H5_FAILED();
+ HDprintf(" an error occurred on only some ranks during split-communicator file access! - "
+ "collectively failing\n");
+ goto error;
+ }
+
+ if (MPI_SUCCESS != MPI_Comm_free(&comm)) {
+ H5_FAILED();
+ HDprintf(" MPI_Comm_free failed\n");
+ goto error;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier on MPI_COMM_WORLD failed\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to create FAPL for deleting test files\n");
+ return;
+ }
+
+ H5Fdelete(FILE_CREATE_TEST_FILENAME, fapl_id);
+
+ /* The below file is deleted as part of the test */
+ /* H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, H5P_DEFAULT); */
+
+ if (H5Pclose(fapl_id) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to close FAPL used for deleting test files\n");
+ return;
+ }
+}
+
+int
+H5_api_file_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel File Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_file_tests); i++) {
+ nerrors += (*par_file_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_file_test_parallel.h b/testpar/API/H5_api_file_test_parallel.h
new file mode 100644
index 0000000..aac9800
--- /dev/null
+++ b/testpar/API/H5_api_file_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_FILE_TEST_PARALLEL_H_
+#define H5_API_FILE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_file_test_parallel(void);
+
+#endif /* H5_API_FILE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_group_test_parallel.c b/testpar/API/H5_api_group_test_parallel.c
new file mode 100644
index 0000000..d6d8f18
--- /dev/null
+++ b/testpar/API/H5_api_group_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_group_test_parallel.h"
+
+/*
+ * The array of parallel group tests to be performed.
+ */
+static int (*par_group_tests[])(void) = {NULL};
+
+int
+H5_api_group_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Group Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_group_tests); i++) {
+ /* nerrors += (*par_group_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_group_test_parallel.h b/testpar/API/H5_api_group_test_parallel.h
new file mode 100644
index 0000000..87dd24f
--- /dev/null
+++ b/testpar/API/H5_api_group_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_GROUP_TEST_PARALLEL_H_
+#define H5_API_GROUP_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_group_test_parallel(void);
+
+#endif /* H5_API_GROUP_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_link_test_parallel.c b/testpar/API/H5_api_link_test_parallel.c
new file mode 100644
index 0000000..fb865a0
--- /dev/null
+++ b/testpar/API/H5_api_link_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_link_test_parallel.h"
+
+/*
+ * The array of parallel link tests to be performed.
+ */
+static int (*par_link_tests[])(void) = {NULL};
+
+int
+H5_api_link_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Link Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_link_tests); i++) {
+ /* nerrors += (*par_link_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_link_test_parallel.h b/testpar/API/H5_api_link_test_parallel.h
new file mode 100644
index 0000000..dbf0fc7
--- /dev/null
+++ b/testpar/API/H5_api_link_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_LINK_TEST_PARALLEL_H_
+#define H5_API_LINK_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_link_test_parallel(void);
+
+#endif /* H5_API_LINK_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_misc_test_parallel.c b/testpar/API/H5_api_misc_test_parallel.c
new file mode 100644
index 0000000..0dc85eb
--- /dev/null
+++ b/testpar/API/H5_api_misc_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_misc_test_parallel.h"
+
+/*
+ * The array of parallel miscellaneous tests to be performed.
+ */
+static int (*par_misc_tests[])(void) = {NULL};
+
+int
+H5_api_misc_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Miscellaneous Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_misc_tests); i++) {
+ /* nerrors += (*par_misc_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_misc_test_parallel.h b/testpar/API/H5_api_misc_test_parallel.h
new file mode 100644
index 0000000..84553a9
--- /dev/null
+++ b/testpar/API/H5_api_misc_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_MISC_TEST_PARALLEL_H_
+#define H5_API_MISC_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_misc_test_parallel(void);
+
+#endif /* H5_API_MISC_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_object_test_parallel.c b/testpar/API/H5_api_object_test_parallel.c
new file mode 100644
index 0000000..a264eb2
--- /dev/null
+++ b/testpar/API/H5_api_object_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_object_test_parallel.h"
+
+/*
+ * The array of parallel object tests to be performed.
+ */
+static int (*par_object_tests[])(void) = {NULL};
+
+int
+H5_api_object_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Object Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_object_tests); i++) {
+ /* nerrors += (*par_object_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_object_test_parallel.h b/testpar/API/H5_api_object_test_parallel.h
new file mode 100644
index 0000000..6a8569f
--- /dev/null
+++ b/testpar/API/H5_api_object_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_OBJECT_TEST_PARALLEL_H_
+#define H5_API_OBJECT_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_object_test_parallel(void);
+
+#endif /* H5_API_OBJECT_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_test_parallel.c b/testpar/API/H5_api_test_parallel.c
new file mode 100644
index 0000000..45fa4ec
--- /dev/null
+++ b/testpar/API/H5_api_test_parallel.c
@@ -0,0 +1,338 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_test_util.h"
+#include "H5_api_test_parallel.h"
+
+#include "H5_api_attribute_test_parallel.h"
+#include "H5_api_dataset_test_parallel.h"
+#include "H5_api_datatype_test_parallel.h"
+#include "H5_api_file_test_parallel.h"
+#include "H5_api_group_test_parallel.h"
+#include "H5_api_link_test_parallel.h"
+#include "H5_api_misc_test_parallel.h"
+#include "H5_api_object_test_parallel.h"
+#ifdef H5_API_TEST_HAVE_ASYNC
+#include "H5_api_async_test_parallel.h"
+#endif
+
+char H5_api_test_parallel_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+const char *test_path_prefix;
+
+size_t n_tests_run_g;
+size_t n_tests_passed_g;
+size_t n_tests_failed_g;
+size_t n_tests_skipped_g;
+
+int mpi_size;
+int mpi_rank;
+
+/* X-macro to define the following for each test:
+ * - enum type
+ * - name
+ * - test function
+ * - enabled by default
+ */
+#ifdef H5_API_TEST_HAVE_ASYNC
+#define H5_API_PARALLEL_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \
+ X(H5_API_TEST_ASYNC, "async", H5_api_async_test_parallel, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#else
+#define H5_API_PARALLEL_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#endif
+
+#define X(a, b, c, d) a,
+enum H5_api_test_type { H5_API_PARALLEL_TESTS };
+#undef X
+#define X(a, b, c, d) b,
+static const char *const H5_api_test_name[] = {H5_API_PARALLEL_TESTS};
+#undef X
+#define X(a, b, c, d) c,
+static int (*H5_api_test_func[])(void) = {H5_API_PARALLEL_TESTS};
+#undef X
+#define X(a, b, c, d) d,
+static int H5_api_test_enabled[] = {H5_API_PARALLEL_TESTS};
+#undef X
+
+static enum H5_api_test_type
+H5_api_test_name_to_type(const char *test_name)
+{
+ enum H5_api_test_type i = 0;
+
+ while (strcmp(H5_api_test_name[i], test_name) && i != H5_API_TEST_MAX)
+ i++;
+
+ return ((i == H5_API_TEST_MAX) ? H5_API_TEST_NULL : i);
+}
+
+static void
+H5_api_test_run(void)
+{
+ enum H5_api_test_type i;
+
+ for (i = H5_API_TEST_FILE; i < H5_API_TEST_MAX; i++)
+ if (H5_api_test_enabled[i])
+ (void)H5_api_test_func[i]();
+}
+
+hid_t
+create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read)
+{
+ hid_t ret_pl = H5I_INVALID_HID;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if ((ret_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ if (H5Pset_fapl_mpio(ret_pl, comm, info) < 0)
+ goto error;
+ if (H5Pset_all_coll_metadata_ops(ret_pl, coll_md_read) < 0)
+ goto error;
+ if (H5Pset_coll_metadata_write(ret_pl, TRUE) < 0)
+ goto error;
+
+ return ret_pl;
+
+error:
+ return H5I_INVALID_HID;
+} /* end create_mpi_fapl() */
+
+/*
+ * Generates random dimensions for a dataspace. The first dimension
+ * is always `mpi_size` to allow for convenient subsetting; the rest
+ * of the dimensions are randomized.
+ */
+int
+generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out)
+{
+ hsize_t *dims = NULL;
+ size_t i;
+
+ if (space_rank <= 0)
+ goto error;
+
+ if (NULL == (dims = HDmalloc((size_t)space_rank * sizeof(hsize_t))))
+ goto error;
+ if (MAINPROCESS) {
+ for (i = 0; i < (size_t)space_rank; i++) {
+ if (i == 0)
+ dims[i] = (hsize_t)mpi_size;
+ else
+ dims[i] = (hsize_t)((rand() % MAX_DIM_SIZE) + 1);
+ }
+ }
+
+ /*
+ * Ensure that the dataset dimensions are uniform across ranks.
+ */
+ if (MPI_SUCCESS != MPI_Bcast(dims, space_rank, MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD))
+ goto error;
+
+ *dims_out = dims;
+
+ return 0;
+
+error:
+ if (dims)
+ HDfree(dims);
+
+ return -1;
+}
+
+int
+main(int argc, char **argv)
+{
+ const char *vol_connector_name;
+ unsigned seed;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Simple argument checking, TODO can improve that later */
+ if (argc > 1) {
+ enum H5_api_test_type i = H5_api_test_name_to_type(argv[1]);
+ if (i != H5_API_TEST_NULL) {
+ /* Run only specific API test */
+ memset(H5_api_test_enabled, 0, sizeof(H5_api_test_enabled));
+ H5_api_test_enabled[i] = 1;
+ }
+ }
+
+ /*
+ * Make sure that HDF5 is initialized on all MPI ranks before proceeding.
+ * This is important for certain VOL connectors which may require a
+ * collective initialization.
+ */
+ H5open();
+
+ n_tests_run_g = 0;
+ n_tests_passed_g = 0;
+ n_tests_failed_g = 0;
+ n_tests_skipped_g = 0;
+
+ if (MAINPROCESS) {
+ seed = (unsigned)HDtime(NULL);
+ }
+
+ if (mpi_size > 1) {
+ if (MPI_SUCCESS != MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf("Couldn't broadcast test seed\n");
+ goto error;
+ }
+ }
+
+ srand(seed);
+
+ if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX)))
+ test_path_prefix = "";
+
+ HDsnprintf(H5_api_test_parallel_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix,
+ PARALLEL_TEST_FILE_NAME);
+
+ if (NULL == (vol_connector_name = HDgetenv(HDF5_VOL_CONNECTOR))) {
+ if (MAINPROCESS)
+ HDprintf("No VOL connector selected; using native VOL connector\n");
+ vol_connector_name = "native";
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("Running parallel API tests with VOL connector '%s'\n\n", vol_connector_name);
+ HDprintf("Test parameters:\n");
+ HDprintf(" - Test file name: '%s'\n", H5_api_test_parallel_filename);
+ HDprintf(" - Number of MPI ranks: %d\n", mpi_size);
+ HDprintf(" - Test seed: %u\n", seed);
+ HDprintf("\n\n");
+ }
+
+ /* Retrieve the VOL cap flags - work around an HDF5
+ * library issue by creating a FAPL
+ */
+ BEGIN_INDEPENDENT_OP(get_capability_flags)
+ {
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, FALSE)) < 0) {
+ if (MAINPROCESS)
+ HDfprintf(stderr, "Unable to create FAPL\n");
+ INDEPENDENT_OP_ERROR(get_capability_flags);
+ }
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) {
+ if (MAINPROCESS)
+ HDfprintf(stderr, "Unable to retrieve VOL connector capability flags\n");
+ INDEPENDENT_OP_ERROR(get_capability_flags);
+ }
+ }
+ END_INDEPENDENT_OP(get_capability_flags);
+
+ /*
+ * Create the file that will be used for all of the tests,
+ * except for those which test file creation.
+ */
+ BEGIN_INDEPENDENT_OP(create_test_container)
+ {
+ if (MAINPROCESS) {
+ if (create_test_container(H5_api_test_parallel_filename, vol_cap_flags_g) < 0) {
+ HDprintf(" failed to create testing container file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(create_test_container);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(create_test_container);
+
+ /* Run all the tests that are enabled */
+ H5_api_test_run();
+
+ if (MAINPROCESS)
+ HDprintf("Cleaning up testing files\n");
+ H5Fdelete(H5_api_test_parallel_filename, fapl_id);
+
+ if (n_tests_run_g > 0) {
+ if (MAINPROCESS)
+ HDprintf("The below statistics are minimum values due to the possibility of some ranks failing a "
+ "test while others pass:\n");
+
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_passed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN,
+ MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" failed to collect consensus about the minimum number of tests that passed -- "
+ "reporting rank 0's (possibly inaccurate) value\n");
+ }
+
+ if (MAINPROCESS)
+ HDprintf("%s%zu/%zu (%.2f%%) API tests passed across all ranks with VOL connector '%s'\n",
+ n_tests_passed_g > 0 ? "At least " : "", n_tests_passed_g, n_tests_run_g,
+ ((double)n_tests_passed_g / (double)n_tests_run_g * 100.0), vol_connector_name);
+
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_failed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN,
+ MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" failed to collect consensus about the minimum number of tests that failed -- "
+ "reporting rank 0's (possibly inaccurate) value\n");
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("%s%zu/%zu (%.2f%%) API tests did not pass across all ranks with VOL connector '%s'\n",
+ n_tests_failed_g > 0 ? "At least " : "", n_tests_failed_g, n_tests_run_g,
+ ((double)n_tests_failed_g / (double)n_tests_run_g * 100.0), vol_connector_name);
+
+ HDprintf("%zu/%zu (%.2f%%) API tests were skipped with VOL connector '%s'\n", n_tests_skipped_g,
+ n_tests_run_g, ((double)n_tests_skipped_g / (double)n_tests_run_g * 100.0),
+ vol_connector_name);
+ }
+ }
+
+ if (fapl_id >= 0 && H5Pclose(fapl_id) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to close MPI FAPL\n");
+ }
+
+ H5close();
+
+ MPI_Finalize();
+
+ HDexit(EXIT_SUCCESS);
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ }
+ H5E_END_TRY;
+
+ MPI_Finalize();
+
+ HDexit(EXIT_FAILURE);
+}
diff --git a/testpar/API/H5_api_test_parallel.h b/testpar/API/H5_api_test_parallel.h
new file mode 100644
index 0000000..6df83e8
--- /dev/null
+++ b/testpar/API/H5_api_test_parallel.h
@@ -0,0 +1,188 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TEST_PARALLEL_H
+#define H5_API_TEST_PARALLEL_H
+
+#include <mpi.h>
+
+#include "testpar.h"
+
+#include "H5_api_test.h"
+
+/* Define H5VL_VERSION if not already defined */
+#ifndef H5VL_VERSION
+#define H5VL_VERSION 0
+#endif
+
+/* Define macro to wait forever depending on version */
+#if H5VL_VERSION >= 2
+#define H5_API_TEST_WAIT_FOREVER H5ES_WAIT_FOREVER
+#else
+#define H5_API_TEST_WAIT_FOREVER UINT64_MAX
+#endif
+
+#define PARALLEL_TEST_FILE_NAME "H5_api_test_parallel.h5"
+extern char H5_api_test_parallel_filename[];
+
+#undef TESTING
+#undef TESTING_2
+#undef PASSED
+#undef H5_FAILED
+#undef H5_WARNING
+#undef SKIPPED
+#undef PUTS_ERROR
+#undef TEST_ERROR
+#undef STACK_ERROR
+#undef FAIL_STACK_ERROR
+#undef FAIL_PUTS_ERROR
+#undef TESTING_MULTIPART
+
+#define TESTING(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf("Testing %-62s", WHAT); \
+ fflush(stdout); \
+ } \
+ n_tests_run_g++; \
+ }
+#define TESTING_2(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf(" Testing %-60s", WHAT); \
+ fflush(stdout); \
+ } \
+ n_tests_run_g++; \
+ }
+#define PASSED() \
+ { \
+ if (MAINPROCESS) { \
+ puts(" PASSED"); \
+ fflush(stdout); \
+ } \
+ n_tests_passed_g++; \
+ }
+#define H5_FAILED() \
+ { \
+ if (MAINPROCESS) { \
+ puts("*FAILED*"); \
+ fflush(stdout); \
+ } \
+ n_tests_failed_g++; \
+ }
+#define H5_WARNING() \
+ { \
+ if (MAINPROCESS) { \
+ puts("*WARNING*"); \
+ fflush(stdout); \
+ } \
+ }
+#define SKIPPED() \
+ { \
+ if (MAINPROCESS) { \
+ puts(" -SKIP-"); \
+ fflush(stdout); \
+ } \
+ n_tests_skipped_g++; \
+ }
+#define PUTS_ERROR(s) \
+ { \
+ if (MAINPROCESS) { \
+ puts(s); \
+ AT(); \
+ } \
+ goto error; \
+ }
+#define TEST_ERROR \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ } \
+ goto error; \
+ }
+#define STACK_ERROR \
+ { \
+ if (MAINPROCESS) { \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ goto error; \
+ }
+#define FAIL_STACK_ERROR \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ goto error; \
+ }
+#define FAIL_PUTS_ERROR(s) \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ puts(s); \
+ } \
+ goto error; \
+ }
+#define TESTING_MULTIPART(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf("Testing %-62s", WHAT); \
+ HDputs(""); \
+ fflush(stdout); \
+ } \
+ }
+
+/*
+ * Macros to surround an action that will be performed non-collectively. Once the
+ * operation has completed, a consensus will be formed by all ranks on whether the
+ * operation failed.
+ */
+#define BEGIN_INDEPENDENT_OP(op_name) \
+ { \
+ hbool_t ind_op_failed = FALSE; \
+ \
+ {
+
+#define END_INDEPENDENT_OP(op_name) \
+ } \
+ \
+ op_##op_name##_end : if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &ind_op_failed, 1, MPI_C_BOOL, \
+ MPI_LOR, MPI_COMM_WORLD)) \
+ { \
+ if (MAINPROCESS) \
+ HDprintf( \
+ " failed to collect consensus about whether non-collective operation was successful\n"); \
+ goto error; \
+ } \
+ \
+ if (ind_op_failed) { \
+ if (MAINPROCESS) \
+ HDprintf(" failure detected during non-collective operation - all other ranks will now fail " \
+ "too\n"); \
+ goto error; \
+ } \
+ }
+
+#define INDEPENDENT_OP_ERROR(op_name) \
+ ind_op_failed = TRUE; \
+ goto op_##op_name##_end;
+
+hid_t create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read);
+int generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out);
+
+extern int mpi_size;
+extern int mpi_rank;
+
+#endif
diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c
new file mode 100644
index 0000000..3e18c8f
--- /dev/null
+++ b/testpar/API/t_bigio.c
@@ -0,0 +1,1942 @@
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5Dprivate.h" /* For Chunk tests */
+#endif
+
+/* FILENAME and filenames must have the same number of names */
+const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL};
+
+/* Constants definitions */
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+
+/* Define some handy debugging shorthands, routines, ... */
+/* debugging tools */
+
+#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
+
+/* Constants definitions */
+#define RANK 2
+
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
+
+#define DATASET1 "DSET1"
+#define DATASET2 "DSET2"
+#define DATASET3 "DSET3"
+#define DATASET4 "DSET4"
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
+#define DXFER_BIGCOUNT (1 << 29)
+
+#define HYPER 1
+#define POINT 2
+#define ALL 3
+
+/* Dataset data type. Int's can be easily octo dumped. */
+typedef hsize_t B_DATATYPE;
+
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+size_t bigcount = (size_t) /* DXFER_BIGCOUNT */ 1310720;
+int nerrors = 0;
+static int mpi_size_g, mpi_rank_g;
+
+hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
+hsize_t space_dim2 = SPACE_DIM2;
+
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
+
+/*
+ * Setup the coordinates for point selection.
+ */
+static void
+set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "total datapoints=%" PRIuHSIZE "\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
+ block[0] * block[1] * count[0] * count[1]);
+ k = 0;
+ for (i = 0; i < num_points; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3" PRIuHSIZE " ", start[1] + j);
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%" PRIuHSIZE " ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset,
+ B_DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("verify_data dumping:::\n");
+ HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
+ "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
+ "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
+ i, j, i + start[0], j + start[1], *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in verify_data\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/* Set up the selection */
+static void
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1 / (stride[0] * block[0]);
+ count[1] = (space_dim2) / (stride[1] * block[1]);
+ start[0] = space_dim1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1);
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1);
+ block[1] = space_dim2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3);
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)mpi_rank * space_dim1;
+ stride[0] = 1;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1 * (hsize_t)mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2.
+ */
+static void
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
+ int mem_selection)
+{
+ DATATYPE *dataptr = dataset;
+ DATATYPE *tmptr;
+ hsize_t i, j, k1, k2, k = 0;
+ /* put some trivial data in the data_array */
+ tmptr = dataptr;
+
+ /* assign the disjoint block (two-dimensional)data array value
+ through the pointer */
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
+
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Print the first block of the content of the dataset.
+ */
+static void
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
+{
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_write(void)
+{
+
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t *coords = NULL;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ size_t num_points;
+ B_DATATYPE *wdata;
+
+ /* allocate memory for data buffer */
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* create the file collectively */
+ fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY_G((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY_G((ret >= 0), "");
+
+ /* Each process takes a slabs of rows. */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset1 write by ROW\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Each process takes a slabs of cols. */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset2 write by COL\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* ALL selection */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = 1;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (mpi_rank_g == 0) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+ if (mpi_rank_g != 0) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Point selection */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset4 write point selection\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
+
+ if (coords)
+ free(coords);
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* create a memory dataspace */
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ HDfree(wdata);
+ H5Fclose(fid);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_read(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ size_t num_points;
+ hsize_t *coords = NULL;
+ herr_t ret; /* Generic return value */
+
+ /* allocate memory for data buffer */
+ rdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((rdata != NULL), "rdata malloc succeeded");
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
+
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* open the file collectively */
+ fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl);
+ VRFY_G((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY_G((ret >= 0), "");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset1 by COL\n");
+
+ dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+ /* Each process takes a slabs of cols. */
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset2 by ROW\n");
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = 1;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (mpi_rank_g == 0) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+ if (mpi_rank_g != 0) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* fill dataset with test data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
+
+ if (mpi_rank_g == 0) {
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset4 with Point selection\n");
+ dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
+
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
+
+ if (coords)
+ HDfree(coords);
+
+ /* create a memory dataspace */
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
+
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ HDfree(wdata);
+ HDfree(rdata);
+
+ wdata = NULL;
+ rdata = NULL;
+ /* We never wrote Dataset5 in the write section, so we can't
+ * expect to read it...
+ */
+ file_dataspace = -1;
+ mem_dataspace = -1;
+ xfer_plist = -1;
+ dataset = -1;
+
+ /* release all temporary handles. */
+ if (file_dataspace != -1)
+ H5Sclose(file_dataspace);
+ if (mem_dataspace != -1)
+ H5Sclose(mem_dataspace);
+ if (xfer_plist != -1)
+ H5Pclose(xfer_plist);
+ if (dataset != -1) {
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+ }
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (rdata)
+ HDfree(rdata);
+ if (wdata)
+ HDfree(wdata);
+
+} /* dataset_large_readAll */
+
+static void
+single_rank_independent_io(void)
+{
+ if (mpi_rank_g == 0)
+ HDprintf("single_rank_independent_io\n");
+
+ if (MAIN_PROCESS) {
+ hsize_t dims[1];
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ herr_t ret;
+ int *data = NULL;
+ uint64_t i;
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
+
+ H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
+ file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY_G((file_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Calculate the number of elements needed to exceed
+ * MPI's INT_MAX limitation
+ */
+ dims[0] = (INT_MAX / sizeof(int)) + 10;
+
+ fspace_id = H5Screate_simple(1, dims, NULL);
+ VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded");
+
+ /*
+ * Create and write to a >2GB dataset from a single rank.
+ */
+ dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+
+ VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ data = malloc(dims[0] * sizeof(int));
+
+ /* Initialize data */
+ for (i = 0; i < dims[0]; i++)
+ data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT);
+
+ /* Write data */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dwrite succeeded");
+
+ /* Wipe buffer */
+ HDmemset(data, 0, dims[0] * sizeof(int));
+
+ /* Read data back */
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data */
+ for (i = 0; i < dims[0]; i++)
+ if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ free(data);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+
+ H5Fdelete(FILENAME[1], fapl_id);
+
+ H5Pclose(fapl_id);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY_G((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY_G((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk1
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: One big singular selection inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1(5760)*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1(5760)
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk1(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk1\n");
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk2
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: many disjoint selections inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1*mpi_size(5760)
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 3 for all dimensions
+ * count0 = space_dim1/stride0(5760/3)
+ * count1 = space_dim2/stride(3/3 = 1)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+void
+coll_chunk2(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk2\n");
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk3
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = space_dim1*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = space_dim1
+ * chunk_dim2 = dim2/2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk3(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk3\n");
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+//-------------------------------------------------------------------------
+// Borrowed/Modified (slightly) from t_coll_chunk.c
+/*-------------------------------------------------------------------------
+ * Function: coll_chunktest
+ *
+ * Purpose: The real testing routine for regular selection of collective
+ chunking storage
+ testing both write and read,
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
+{
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
+
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int *data_array1 = NULL;
+ int *data_origin1 = NULL;
+
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = space_dim1 * (hsize_t)mpi_size_g;
+ dims[1] = space_dim2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ /* Putting the warning about H5Screate_simple (above) into practice... */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY_G((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY_G((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY_G((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset write succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "");
+
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "");
+
+ if (data_array1)
+ HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY_G((mem_dataspace >= 0), "");
+ }
+ else {
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+int
+main(int argc, char **argv)
+{
+ hid_t acc_plist = H5I_INVALID_HID;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g);
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0)
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+
+ /* set alarm. */
+ /* TestAlarmOn(); */
+
+ acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) {
+ if (MAIN_PROCESS)
+ HDprintf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close,
+ * and H5Dget_space. */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAIN_PROCESS)
+ HDprintf(
+ "API functions for basic file, dataset basic or more aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ dataset_big_write();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ dataset_big_read();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ single_rank_independent_io();
+
+ /* turn off alarm */
+ /* TestAlarmOff(); */
+
+ if (mpi_rank_g == 0) {
+ hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fdelete(FILENAME[0], fapl_id);
+ H5Fdelete(FILENAME[1], fapl_id);
+ }
+ H5E_END_TRY;
+
+ H5Pclose(fapl_id);
+ }
+
+ H5Pclose(acc_plist);
+
+ /* close HDF5 library */
+ H5close();
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c
new file mode 100644
index 0000000..dd78225
--- /dev/null
+++ b/testpar/API/t_chunk_alloc.c
@@ -0,0 +1,512 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This verifies if the storage space allocation methods are compatible between
+ * serial and parallel modes.
+ *
+ * Created by: Christian Chilan and Albert Cheng
+ * Date: 2006/05/25
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+static int mpi_size, mpi_rank;
+
+#define DSET_NAME "ExtendibleArray"
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
+#define CLOSE 1
+#define NO_CLOSE 0
+
+#if 0
+static MPI_Offset
+get_filesize(const char *filename)
+{
+ int mpierr;
+ MPI_File fd;
+ MPI_Offset filesize;
+
+ mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ mpierr = MPI_File_get_size(fd, &filesize);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ mpierr = MPI_File_close(&fd);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ return (filesize);
+}
+#endif
+
+typedef enum write_pattern { none, sec_last, all } write_type;
+
+typedef enum access_ { write_all, open_only, extend_only } access_type;
+
+/*
+ * This creates a dataset serially with chunks, each of CHUNK_SIZE
+ * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
+ * routine will open this in parallel for extension test.
+ */
+static void
+create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
+{
+ hid_t file_id, dataset; /* handles */
+ hid_t dataspace, memspace;
+ hid_t cparms;
+ hsize_t dims[1];
+ hsize_t maxdims[1] = {H5S_UNLIMITED};
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ long nchunks;
+ herr_t hrc;
+#if 0
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
+#endif
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Only MAINPROCESS should create the file. Others just wait. */
+ if (MAINPROCESS) {
+ nchunks = chunk_factor * mpi_size;
+ dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
+ /* Create the data space with unlimited dimensions. */
+ dataspace = H5Screate_simple(1, dims, maxdims);
+ VRFY((dataspace >= 0), "");
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ /* Create a new file. If file exists its contents will be overwritten. */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((file_id >= 0), "H5Fcreate");
+
+ /* Modify dataset creation properties, i.e. enable chunking */
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((cparms >= 0), "");
+
+ hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Pset_chunk(cparms, 1, chunk_dims);
+ VRFY((hrc >= 0), "");
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ dataset =
+ H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ if (write_pattern == sec_last) {
+ HDmemset(buffer, 100, CHUNK_SIZE);
+
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Write sec_last chunk */
+ hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+ } /* end if */
+
+ /* Close resources */
+ hrc = H5Dclose(dataset);
+ VRFY((hrc >= 0), "");
+ dataset = -1;
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Pclose(cparms);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Fclose(file_id);
+ VRFY((hrc >= 0), "");
+ file_id = -1;
+
+#if 0
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+#endif
+ }
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * This program performs three different types of parallel access. It writes on
+ * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
+ * opens the dataset. At the end, it verifies the size of the dataset to be
+ * consistent with argument 'chunk_factor'.
+ */
+static void
+parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
+ hid_t *dataset)
+{
+ /* HDF5 gubbins */
+ hid_t memspace, dataspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+ hsize_t size[1];
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ hsize_t dims[1];
+ hsize_t maxdims[1];
+
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ int i;
+ long nchunks;
+#if 0
+ /* MPI Gubbins */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
+#endif
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ nchunks = chunk_factor * mpi_size;
+
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "");
+
+ /* Open the file */
+ if (*file_id < 0) {
+ *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((*file_id >= 0), "");
+ }
+
+ /* Open dataset*/
+ if (*dataset < 0) {
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
+ VRFY((*dataset >= 0), "");
+ }
+
+ /* Make sure all processes are done before continuing. Otherwise, one
+ * process could change the dataset extent before another finishes opening
+ * it, resulting in only some of the processes calling H5Dset_extent(). */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ dataspace = H5Dget_space(*dataset);
+ VRFY((dataspace >= 0), "");
+
+ size[0] = (hsize_t)nchunks * CHUNK_SIZE;
+
+ switch (action) {
+
+ /* all chunks are written by all the processes in an interleaved way*/
+ case write_all:
+
+ HDmemset(buffer, mpi_rank + 1, CHUNK_SIZE);
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks / mpi_size; i++) {
+ offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+ }
+
+ break;
+
+ /* only extends the dataset */
+ case extend_only:
+ /* check if new size is larger than old size */
+ hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims);
+ VRFY((hrc >= 0), "");
+
+ /* Extend dataset*/
+ if (size[0] > dims[0]) {
+ hrc = H5Dset_extent(*dataset, size);
+ VRFY((hrc >= 0), "");
+ }
+ break;
+
+ /* only opens the *dataset */
+ case open_only:
+ break;
+ default:
+ HDassert(0);
+ }
+
+ /* Close up */
+ hrc = H5Dclose(*dataset);
+ VRFY((hrc >= 0), "");
+ *dataset = -1;
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Fclose(*file_id);
+ VRFY((hrc >= 0), "");
+ *file_id = -1;
+
+#if 0
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+#endif
+
+ /* Can close some plists */
+ hrc = H5Pclose(access_plist);
+ VRFY((hrc >= 0), "");
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * This routine verifies the data written in the dataset. It does one of the
+ * three cases according to the value of parameter `write_pattern'.
+ * 1. it returns correct fill values though the dataset has not been written;
+ * 2. it still returns correct fill values though only a small part is written;
+ * 3. it returns correct values when the whole dataset has been written in an
+ * interleaved pattern.
+ */
+static void
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
+ hid_t *dataset)
+{
+ /* HDF5 gubbins */
+ hid_t dataspace, memspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ int value, i;
+ int index_l;
+ long nchunks;
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ nchunks = chunk_factor * mpi_size;
+
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "");
+
+ /* Open the file */
+ if (*file_id < 0) {
+ *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((*file_id >= 0), "");
+ }
+
+ /* Open dataset*/
+ if (*dataset < 0) {
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
+ VRFY((*dataset >= 0), "");
+ }
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ dataspace = H5Dget_space(*dataset);
+ VRFY((dataspace >= 0), "");
+
+ /* all processes check all chunks. */
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks; i++) {
+ /* reset buffer values */
+ HDmemset(buffer, -1, CHUNK_SIZE);
+
+ offset[0] = (hsize_t)i * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Read the chunk */
+ hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* set expected value according the write pattern */
+ switch (write_pattern) {
+ case all:
+ value = i % mpi_size + 1;
+ break;
+ case none:
+ value = 0;
+ break;
+ case sec_last:
+ if (i == nchunks - 2)
+ value = 100;
+ else
+ value = 0;
+ break;
+ default:
+ HDassert(0);
+ }
+
+ /* verify content of the chunk */
+ for (index_l = 0; index_l < CHUNK_SIZE; index_l++)
+ VRFY((buffer[index_l] == value), "data verification");
+ }
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ /* Can close some plists */
+ hrc = H5Pclose(access_plist);
+ VRFY((hrc >= 0), "");
+
+ /* Close up */
+ if (vclose) {
+ hrc = H5Dclose(*dataset);
+ VRFY((hrc >= 0), "");
+ *dataset = -1;
+
+ hrc = H5Fclose(*file_id);
+ VRFY((hrc >= 0), "");
+ *file_id = -1;
+ }
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * Test following possible scenarios,
+ * Case 1:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
+ * size, no write, close, reopen in parallel, read to verify all return
+ * the fill value.
+ * Case 2:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small
+ * size, no write, close, reopen in parallel, extend to large size, then close,
+ * then reopen in parallel and read to verify all return the fill value.
+ * Case 3:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
+ * size, write just a small part of the dataset (second to the last), close,
+ * then reopen in parallel, read to verify all return the fill value except
+ * those small portion that has been written. Without closing it, writes
+ * all parts of the dataset in a interleave pattern, close it, and reopen
+ * it, read to verify all data are as written.
+ */
+void
+test_chunk_alloc(void)
+{
+ const char *filename;
+ hid_t file_id, dataset;
+
+ file_id = dataset = -1;
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend Chunked allocation test on file %s\n", filename);
+
+ /* Case 1 */
+ /* Create chunked dataset without writing anything.*/
+ create_chunked_dataset(filename, CHUNK_FACTOR, none);
+ /* reopen dataset in parallel and check for file size */
+ parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
+
+ /* Case 2 */
+ /* Create chunked dataset without writing anything */
+ create_chunked_dataset(filename, 20, none);
+ /* reopen dataset in parallel and only extend it */
+ parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
+
+ /* Case 3 */
+ /* Create chunked dataset and write in the second to last chunk */
+ create_chunked_dataset(filename, CHUNK_FACTOR, sec_last);
+ /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/
+ verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset);
+ /* All processes write in all the chunks in a interleaved way */
+ parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
+}
diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c
new file mode 100644
index 0000000..57ee605
--- /dev/null
+++ b/testpar/API/t_coll_chunk.c
@@ -0,0 +1,1417 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#define HYPER 1
+#define POINT 2
+#define ALL 3
+
+/* some commonly used routines for collective chunk IO tests*/
+
+static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[],
+ hsize_t block[], int mode);
+
+static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, int mem_selection);
+
+static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset);
+
+static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, DATATYPE *original, int mem_selection);
+
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk1
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: One big singular selection inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = SPACE_DIM1(5760)*mpi_size
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1(5760)
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk1(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk2
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: many disjoint selections inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = SPACE_DIM1*mpi_size(5760)
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 3 for all dimensions
+ * count0 = SPACE_DIM1/stride0(5760/3)
+ * count1 = SPACE_DIM2/stride(3/3 = 1)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+void
+coll_chunk2(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk3
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = SPACE_DIM1
+ * chunk_dim2 = dim2/2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk3(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_size;
+ int mpi_rank;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk4
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk4(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk4
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk5(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk6
+ *
+ * Purpose: Test direct request for multi-chunk-io.
+ * Wrapper to test the collective chunk IO for regular JOINT
+ * selection with at least number of 2*mpi_size chunks
+ * Test for direct to Multi Chunk I/O.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk6(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk7
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk7(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk8
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk8(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk9
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk9(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk10
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk10(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunktest
+ *
+ * Purpose: The real testing routine for regular selection of collective
+ chunking storage
+ testing both write and read,
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Modifications:
+ * Remove invalid temporary property checkings for API_LINK_HARD and
+ * API_LINK_TRUE cases.
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
+{
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
+
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int *data_array1 = NULL;
+ int *data_origin1 = NULL;
+
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ int mpi_size, mpi_rank;
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ /* set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size);
+ dims[1] = SPACE_DIM2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset write succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ /* Only check chunk optimization mode if selection I/O is not being used -
+ * selection I/O bypasses this IO mode decision - it's effectively always
+ * multi chunk currently */
+ if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ TRUE) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "");
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "");
+
+ status = H5Fclose(file);
+ VRFY((status >= 0), "");
+
+ if (data_array1)
+ HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY((mem_dataspace >= 0), "");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/* Set up the selection */
+static void
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE_DIM1;
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = SPACE_DIM1 / (stride[0] * block[0]);
+ count[1] = (SPACE_DIM2) / (stride[1] * block[1]);
+ start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1);
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3));
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)(mpi_rank * SPACE_DIM1);
+ stride[0] = 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size;
+ block[1] = SPACE_DIM2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2.
+ */
+static void
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
+ int mem_selection)
+{
+ DATATYPE *dataptr = dataset;
+ DATATYPE *tmptr;
+ hsize_t i, j, k1, k2, k = 0;
+ /* put some trivial data in the data_array */
+ tmptr = dataptr;
+
+ /* assign the disjoint block (two-dimensional)data array value
+ through the pointer */
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
+
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Print the first block of the content of the dataset.
+ */
+static void
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
+{
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c
new file mode 100644
index 0000000..f6f99bf
--- /dev/null
+++ b/testpar/API/t_coll_md_read.c
@@ -0,0 +1,654 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A test suite to test HDF5's collective metadata read and write capabilities,
+ * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or
+ * H5Pset_coll_metadata_write().
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Define the non-participating process as the "last"
+ * rank to avoid any weirdness potentially caused by
+ * an if (mpi_rank == 0) check.
+ */
+#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
+#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
+#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
+#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+
+#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
+
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+
+#define COLL_GHEAP_WRITE_ATTR_NELEMS 10
+#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr"
+#define COLL_GHEAP_WRITE_ATTR_DIMS 1
+
+/*
+ * A test for issue HDFFV-10501. A parallel hang was reported which occurred
+ * in linked-chunk I/O when collective metadata reads are enabled and some ranks
+ * do not have any selection in a dataset's dataspace, while others do. The ranks
+ * which have no selection during the read/write operation called H5D__chunk_addrmap()
+ * to retrieve the lowest chunk address, since we require that the read/write be done
+ * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
+ * this caused the non-participating ranks to issue a collective MPI_Bcast() call
+ * which the other ranks did not issue, thus causing a hang.
+ *
+ * However, since these ranks are not actually reading/writing anything, this call
+ * can simply be removed and the address used for the read/write can be set to an
+ * arbitrary number (0 was chosen).
+ */
+void
+test_partial_no_selection_coll_md_read(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE,
+ PARTIAL_NO_SELECTION_X_DIM_SCALE};
+ hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+ max_dataset_dims[1] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
+ start[1] = 0;
+ stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+ count[0] = 1;
+ count[1] = (hsize_t)mpi_size;
+ block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ /*
+ * Make sure to call H5Sselect_none() on the non-participating process.
+ */
+ if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
+ VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
+ }
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
+
+ /*
+ * Check data integrity just to be sure.
+ */
+ if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((!HDmemcmp(data, read_buf,
+ count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int))),
+ "memcmp succeeded");
+ }
+
+ if (dataset_dims) {
+ HDfree(dataset_dims);
+ dataset_dims = NULL;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using multi-chunk
+ * I/O with collective metadata reads enabled doesn't causes issues due to
+ * collective metadata reads being made only by process 0 in H5D__chunk_addrmap().
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
+ *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006)
+ *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................:
+ *MPIR_Bcast_intra(1249)............:
+ *MPIR_SMP_Bcast(1088)..............:
+ *MPIR_Bcast_binomial(239)..........:
+ *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer
+ *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String
+ *
+ */
+void
+test_multi_chunk_io_addrmap_issue(void)
+{
+ const char *filename;
+ hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
+ hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
+ hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+ int mpi_rank;
+ int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}};
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 5;
+ block[0] = block[1] = 1;
+
+ if (mpi_rank == 0)
+ start[0] = 0;
+ else
+ start[0] = 5;
+
+ VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+ if (mpi_rank != 0)
+ VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ read_buf = HDmalloc(50 * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using linked-chunk
+ * I/O with collective metadata reads enabled doesn't cause issues due to
+ * collective metadata reads being made only by process 0 in H5D__sort_chunk().
+ *
+ * NOTE: Due to the way that the threshold value which pertains to this test
+ * is currently calculated within HDF5, the following two conditions must be
+ * true to trigger the issue:
+ *
+ * Condition 1: A certain threshold ratio must be met in order to have HDF5
+ * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
+ * given by the following:
+ *
+ * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
+ *
+ * where:
+ * * `sum_chunk` is the combined sum of the number of chunks selected in
+ * the dataset by all ranks (chunks selected by more than one rank count
+ * individually toward the sum for each rank selecting that chunk)
+ * * `dataset_nchunks` is the number of chunks in the dataset (selected
+ * or not)
+ * * `mpi_size` is the size of the MPI Communicator
+ *
+ * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
+ * threshold (as of this writing, 10000).
+ *
+ * To satisfy both these conditions, we #define a macro,
+ * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
+ * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
+ * 10000 threshold from condition 2). We then create a dataset of that many
+ * chunks and have each MPI rank write to and read from a piece of every single
+ * chunk in the dataset. This ensures chunk utilization is the max possible
+ * and exceeds our 30% target ratio, while always exactly matching the numeric
+ * chunk threshold value of condition 2.
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack:
+ *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452)...:
+ *MPIR_Bcast(1476)........:
+ *MPIR_Bcast_intra(1249)..:
+ *MPIR_SMP_Bcast(1088)....:
+ *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received
+ *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String
+ */
+void
+test_link_chunk_io_sort_chunk_issue(void)
+{
+ const char *filename;
+ hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ /*
+ * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM
+ * chunks, where every rank writes to a piece of every single chunk to keep utilization high.
+ */
+ dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */
+ chunk_dims[0] = (hsize_t)mpi_size;
+
+ VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ */
+ start[0] = (hsize_t)mpi_rank;
+ stride[0] = (hsize_t)mpi_size;
+ count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+ block[0] = 1;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0];
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[0] * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[0] * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0];
+
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for GitHub issue #2433 which causes a collective metadata write
+ * of global heap data. This test is meant to ensure that global heap data
+ * gets correctly mapped as raw data during a collective metadata write
+ * using vector I/O.
+ *
+ * An assertion exists in the library that should be triggered if global
+ * heap data is not correctly mapped as raw data.
+ */
+void
+test_collective_global_heap_write(void)
+{
+ const char *filename;
+ hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t vl_type = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hvl_t vl_data;
+ int mpi_rank, mpi_size;
+ int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata
+ * writes on the FAPL, we call it here just to be sure this is futureproof,
+ * since demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ attr_dims[0] = 1;
+
+ fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ vl_type = H5Tvlen_create(H5T_NATIVE_INT);
+ VRFY((vl_type >= 0), "H5Tvlen_create succeeded");
+
+ vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS;
+ vl_data.p = data_buf;
+
+ /*
+ * Create a variable-length attribute that will get written to the global heap
+ */
+ attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((attr_id >= 0), "H5Acreate2 succeeded");
+
+ for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++)
+ data_buf[i] = (int)i;
+
+ VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded");
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded");
+ VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c
new file mode 100644
index 0000000..d005243
--- /dev/null
+++ b/testpar/API/t_dset.c
@@ -0,0 +1,4335 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for datasets
+ */
+
+/*
+ * Example of using the parallel HDF5 library to access datasets.
+ *
+ * This program contains three major parts. Part 1 tests fixed dimension
+ * datasets, for both independent and collective transfer modes.
+ * Part 2 tests extendible datasets, for independent transfer mode
+ * only.
+ * Part 3 tests extendible datasets, for collective transfer mode
+ * only.
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/*
+ * The following are various utility routines used by the tests.
+ */
+
+/*
+ * Setup the dimensions of the hyperslab.
+ * Two modes--by rows or by columns.
+ * Assume dimension rank is 2.
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
+ */
+static void
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+ switch (mode) {
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ k = 0;
+ for (i = 0; i < num_points; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+int
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
+ (unsigned long)(j + start[1]), *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/*
+ * Part 1.a--Independent read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 files with parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+dataset_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* ---------------------------------------------
+ * Define the dimensions of the overall datasets
+ * and the slabs local to the MPI process.
+ * ------------------------------------------- */
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * To test the independent orders of writes between processes, all
+ * even number processes write to dataset1 first, then dataset2.
+ * All odd number processes write to dataset2 first, then dataset1.
+ */
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to write with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeInd by some with zero row");
+ if ((mpi_rank / 2) * 2 != mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+ }
+#ifdef BARRIER_CHECKS
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+
+ /* release dataspace ID */
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* release all IDs created */
+ H5Sclose(sid);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read a dataset */
+void
+dataset_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* release all IDs created */
+ H5Sclose(file_dataspace);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 1.b--Collective read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and create the dataset
+ * ------------------------- */
+ /* setup 2-D dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ datatype = H5Tcopy(H5T_NATIVE_INT);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ VRFY((ret >= 0), "H5Tset_order succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded");
+
+ /* create a third dataset collectively */
+ dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset3 >= 0), "H5Dcreate2 succeeded");
+
+ dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dcreate2 succeeded");
+ dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dcreate2 succeeded");
+ dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dcreate2 succeeded");
+
+ /* release 2-D space ID created */
+ H5Sclose(sid);
+
+ /* setup scalar dimensionality object */
+ sid = H5Screate(H5S_SCALAR);
+ VRFY((sid >= 0), "H5Screate succeeded");
+
+ /* create a fourth dataset collectively */
+ dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset4 >= 0), "H5Dcreate2 succeeded");
+
+ /* release scalar space ID created */
+ H5Sclose(sid);
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll by Row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* setup dimensions again to writeAll with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to writeAll with zero columns for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero col");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset3 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset3);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ } /* end if */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset4 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset4: each process writes no data, except process zero uses "all" selection. */
+ /* Additionally, these are in a scalar dataspace */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset4);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate(H5S_SCALAR);
+ VRFY((mem_dataspace >= 0), "");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* Dataset5: point selection in File - Hyperslab selection in Memory*/
+ /* create a file dataspace independently */
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space(dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset7: point selection in File - All selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All writes completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset3);
+ VRFY((ret >= 0), "H5Dclose3 succeeded");
+ ret = H5Dclose(dataset4);
+ VRFY((ret >= 0), "H5Dclose4 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ int i, j, k;
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------
+ * Open the datasets in it
+ * ------------------------- */
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded");
+
+ /* open another dataset collectively */
+ dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded");
+ dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded");
+ dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* setup dimensions again to readAll with zero columns for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero col");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* setup dimensions again to readAll with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero row");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ if (data_origin1)
+ free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* Dataset5: point selection in memory - Hyperslab selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset5 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset6 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset7: point selection in memory - All selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ num_points = (size_t)(dim0 * dim1);
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ for (j = 0; j < dim1; j++) {
+ coords[k++] = (hsize_t)i;
+ coords[k++] = (hsize_t)j;
+ }
+ }
+ mem_dataspace = H5Dget_space(dataset7);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset7 succeeded");
+
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
+ data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All reads completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 2--Independent read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with independent parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple(RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create an extendable dataset
+ * and perform I/O on it in a way that verifies that the chunk cache is
+ * bypassed for parallel I/O.
+ */
+
+void
+extend_writeInd2(void)
+{
+ const char *filename;
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size = 10; /* Original dataset dim size */
+ hsize_t new_size = 20; /* Extended dataset dim size */
+ hsize_t one = 1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ fs = H5Screate_simple(1, &orig_size, &max_size);
+ VRFY((fs >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreat2e succeeded");
+
+ /* release resource */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* -------------------------
+ * Test writing to dataset
+ * -------------------------*/
+ /* create a memory dataspace independently */
+ ms = H5Screate_simple(1, &orig_size, &max_size);
+ VRFY((ms >= 0), "H5Screate_simple succeeded");
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < (int)orig_size; i++)
+ written[i] = i;
+ MESG("data array initialized");
+ if (VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read initial data from dataset.
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
+ nerrors++;
+ }
+ if (VERBOSE_MED) {
+ MESG("read at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
+ }
+
+ /* -------------------------
+ * Extend the dataset & retrieve new dataspace
+ * -------------------------*/
+ ret = H5Dset_extent(dataset, &new_size);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+ ret = H5Sclose(fs);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ fs = H5Dget_space(dataset);
+ VRFY((fs >= 0), "H5Dget_space succeeded");
+
+ /* -------------------------
+ * Write to the second half of the dataset
+ * -------------------------*/
+ for (i = 0; i < (int)orig_size; i++)
+ written[i] = (int)orig_size + i;
+ MESG("data array re-initialized");
+ if (VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read the new data
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
+ nerrors++;
+ }
+ if (VERBOSE_MED) {
+ MESG("read at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
+ }
+
+ /* Close dataset collectively */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+
+ /* Close the file collectively */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 3--Collective read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with collective parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple(RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+static const char *
+h5_rmprefix(const char *filename)
+{
+ const char *ret_ptr;
+
+ if ((ret_ptr = HDstrstr(filename, ":")) == NULL)
+ ret_ptr = filename;
+ else
+ ret_ptr++;
+
+ return (ret_ptr);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read a compressed
+ * dataset in an HDF5 file with collective parallel access support.
+ */
+void
+compress_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank = 1; /* Dataspace rank */
+ hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_orig = NULL; /* expected data buffer */
+ const char *filename;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
+
+ /* Retrieve MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* Allocate data buffer */
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
+ VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
+ data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
+ VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
+
+ /* Initialize data buffers */
+ for (u = 0; u < dim; u++)
+ data_orig[u] = (DATATYPE)u;
+
+ /* Run test both with and without filters disabled on partial chunks */
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if (mpi_rank == 0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if (disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret >= 0), "H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret >= 0), "H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset =
+ H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dopen2 succeeded");
+
+ /* Try reading & writing data */
+ if (dataset > 0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data read */
+ for (u = 0; u < dim; u++)
+ if (data_orig[u] != data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
+ (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
+ nerrors++;
+ }
+
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+#endif
+
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
+
+ /* release data buffers */
+ if (data_read)
+ HDfree(data_read);
+ if (data_orig)
+ HDfree(data_orig);
+}
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+/*
+ * Part 4--Non-selection for chunked dataset
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create chunked
+ * dataset in one HDF5 file with collective and independent parallel
+ * MPIO access support. The Datasets are of sizes dim0 x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within the
+ * dataset with the exception that one processor selects no element.
+ */
+
+void
+none_selection_chunk(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_origin = NULL; /* data buffer */
+ DATATYPE *data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test collective writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* allocate memory for data buffer. Only allocate enough buffer for
+ * each processor's data. */
+ if (mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
+ VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
+
+ data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* put some trivial data in the data_array */
+ mstart[0] = mstart[1] = 0;
+ dataset_fill(mstart, block, data_origin);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
+ }
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Process 0 has no selection */
+ if (!mpi_rank) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Process 0 has no selection */
+ if (!mpi_rank) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if (mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if (ret)
+ nerrors++;
+ }
+
+ /* -------------------------
+ * Test independent writing to dataset2
+ * -------------------------*/
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if (mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if (ret)
+ nerrors++;
+ }
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_origin)
+ HDfree(data_origin);
+ if (data_array)
+ HDfree(data_array);
+}
+
+/* Function: test_actual_io_mode
+ *
+ * Purpose: tests one specific case of collective I/O and checks that the
+ * actual_chunk_opt_mode property and the actual_io_mode
+ * properties in the DXPL have the correct values.
+ *
+ * Input: selection_mode: changes the way processes select data from the space, as well
+ * as some dxpl flags to get collective I/O to break in different ways.
+ *
+ * The relevant I/O function and expected response for each mode:
+ * TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ * H5D_mpi_chunk_collective_io, each process reports independent I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ * H5D_mpi_chunk_collective_io, each process reports collective I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ * H5D_mpi_chunk_collective_io, each process reports mixed I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ * H5D_mpi_chunk_collective_io, processes disagree. The root reports
+ * collective, the rest report independent I/O
+ *
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ *
+ * TEST_ACTUAL_IO_LINK_CHUNK:
+ * H5D_link_chunk_collective_io, processes report linked chunk I/O
+ *
+ * TEST_ACTUAL_IO_CONTIGUOUS:
+ * H5D__contig_collective_write or H5D__contig_collective_read
+ * each process reports contiguous collective I/O
+ *
+ * TEST_ACTUAL_IO_NO_COLLECTIVE:
+ * Simple independent I/O. This tests that the defaults are properly set.
+ *
+ * TEST_ACTUAL_IO_RESET:
+ * Performs collective and then independent I/O with hthe same dxpl to
+ * make sure the peroperty is correctly reset to the default on each use.
+ * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
+ * (The most complex case that works on all builds) and then performs
+ * an independent read and write with the same dxpls.
+ *
+ * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
+ * is not needed as they are covered by DIRECT_CHUNK_MIX and
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold.
+ *
+ * Modification:
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+static void
+test_actual_io_mode(int selection_mode)
+{
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ const char *filename;
+ const char *test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int *buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hsize_t chunk_dims[RANK];
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ char message[256];
+ herr_t ret;
+
+ /* Set up some flags to make some future if statements slightly more readable */
+ direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
+ * tests independent I/O
+ */
+ multi_chunk_io =
+ (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
+
+ is_chunked =
+ (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Create the basic Space */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ /* If we are not testing contiguous datasets */
+ if (is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* Create the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Choose a selection method based on the type of I/O we want to occur,
+ * and also set up some selection-dependeent test info. */
+ switch (selection_mode) {
+
+ /* Independent I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ /* Since the dataset is chunked by row and each process selects a row,
+ * each process writes to a different chunk. This forces all I/O to be
+ * independent.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Multi Chunk - Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Collective I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ /* The dataset is chunked by rows, so each process takes a column which
+ * spans all chunks. Since the processes write non-overlapping regular
+ * selections to each chunk, the operation is purely collective.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ test_name = "Multi Chunk - Collective";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if (mpi_size > 1)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Mixed I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O, have the root select all chunks and each
+ * subsequent process select the first and nth chunk. The first chunk,
+ * accessed by all, will be assigned collective I/O while each other chunk
+ * will be accessed only by the root and the nth process and will be
+ * assigned independent I/O. Each process will access one chunk collectively
+ * and at least one chunk independently, reporting mixed I/O.
+ */
+
+ if (mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ }
+ else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ }
+
+ test_name = "Multi Chunk - Mixed";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ break;
+
+ /* RESET tests that the properties are properly reset to defaults each time I/O is
+ * performed. To achieve this, we have RESET perform collective I/O (which would change
+ * the values from the defaults) followed by independent I/O (which should report the
+ * default values). RESET doesn't need to have a unique selection, so we reuse
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * on all builds. The independent section of RESET can be found at the end of this function.
+ */
+ case TEST_ACTUAL_IO_RESET:
+
+ /* Mixed I/O with optimization and internal disagreement */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O with disagreement, assign process n to the
+ * first chunk and the nth chunk. The first chunk, selected by all, is
+ * assgigned collective I/O, while each other process gets independent I/O.
+ * Since the root process with only access the first chunk, it will report
+ * collective I/O. The subsequent processes will access the first chunk
+ * collectively, and their other chunk independently, reporting mixed I/O.
+ */
+
+ if (mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / (hsize_t)mpi_size;
+ }
+ else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ }
+
+ /* If the testname was not already set by the RESET case */
+ if (selection_mode == TEST_ACTUAL_IO_RESET)
+ test_name = "RESET";
+ else
+ test_name = "Multi Chunk - Mixed (Disagreement)";
+
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if (mpi_size > 1) {
+ if (mpi_rank == 0)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ }
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+
+ break;
+
+ /* Linked Chunk I/O */
+ case TEST_ACTUAL_IO_LINK_CHUNK:
+ /* Nothing special; link chunk I/O is forced in the dxpl settings. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Link Chunk";
+ actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ break;
+
+ /* Contiguous Dataset */
+ case TEST_ACTUAL_IO_CONTIGUOUS:
+ /* A non overlapping, regular selection in a contiguous dataset leads to
+ * collective I/O */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Contiguous";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ break;
+
+ case TEST_ACTUAL_IO_NO_COLLECTIVE:
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ break;
+
+ default:
+ test_name = "Undefined Selection Mode";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ break;
+ }
+
+ ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Create a memory dataspace mirroring the dataset and select the same hyperslab
+ * as in the file space.
+ */
+ mem_space = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+
+ ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Get the number of elements in the selection */
+ length = dim0 * dim1;
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ /* Set collective I/O properties in the dxpl. */
+ if (is_collective) {
+ /* Request collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
+ * multi chunk io instead of link chunk io.
+ * This is via default.
+ */
+ if (multi_chunk_io) {
+ /* force multi-chunk-io by threshold */
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ /* set this to manipulate testing scenario about allocating processes
+ * to chunks */
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+ }
+
+ /* Set directly go to multi-chunk-io without threshold calc. */
+ if (direct_multi_chunk_io) {
+ /* set for multi chunk io by property*/
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ }
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Retrieve Actual io values */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Retrieve Actual io values */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ /* Check write vs read */
+ VRFY((actual_io_mode_read == actual_io_mode_write),
+ "reading and writing are the same for actual_io_mode");
+ VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
+ "reading and writing are the same for actual_chunk_opt_mode");
+
+ /* Test values */
+ if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
+ actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
+ HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
+ test_name);
+ VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
+ HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
+ VRFY((actual_io_mode_write == actual_io_mode_expected), message);
+ }
+ else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
+ actual_io_mode_write);
+ }
+
+ /* To test that the property is successfully reset to the default, we perform some
+ * independent I/O after the collective I/O
+ */
+ if (selection_mode == TEST_ACTUAL_IO_RESET) {
+ if (mpi_rank == 0) {
+ /* Switch to independent io */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
+ VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset write (independent)");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
+ VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset read (independent)");
+ }
+ }
+
+ /* Release some resources */
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dxpl_write);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dxpl_read);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(mem_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(file_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ HDfree(buffer);
+ return;
+}
+
+/* Function: actual_io_mode_tests
+ *
+ * Purpose: Tests all possible cases of the actual_io_mode property.
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+void
+actual_io_mode_tests(void)
+{
+ int mpi_size = -1;
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Only run these tests if selection I/O is not being used - selection I/O
+ * bypasses this IO mode decision - it's effectively always multi chunk
+ * currently */
+ if (/* !H5_use_selection_io_g */ TRUE) {
+ test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
+
+ /* The Multi Chunk Mixed test requires at least three processes. */
+ if (mpi_size > 2)
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
+ else
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
+
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
+
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
+ test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_RESET);
+ }
+
+ return;
+}
+
+/*
+ * Function: test_no_collective_cause_mode
+ *
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
+ * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
+ *
+ * Input:
+ * selection_mode: various mode to cause broken collective I/O
+ * Note: Originally, each TEST case is supposed to be used alone.
+ * After some discussion, this is updated to take multiple TEST cases
+ * with '|'. However there is no error check for any of combined
+ * test cases, so a tester is responsible to understand and feed
+ * proper combination of TESTs if needed.
+ *
+ *
+ * TEST_COLLECTIVE:
+ * Test for regular collective I/O without cause of breaking.
+ * Just to test normal behavior.
+ *
+ * TEST_SET_INDEPENDENT:
+ * Test for Independent I/O as the cause of breaking collective I/O.
+ *
+ * TEST_DATATYPE_CONVERSION:
+ * Test for Data Type Conversion as the cause of breaking collective I/O.
+ *
+ * TEST_DATA_TRANSFORMS:
+ * Test for Data Transform feature as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
+ * Test for NULL dataspace as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
+ * Test for Compact layout as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
+ * Test for Externl-File storage as the cause of breaking collective I/O.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+#ifdef LATER
+#define DSET_NOCOLCAUSE "nocolcause"
+#endif
+#define FILE_EXTERNAL "nocolcause_extern.data"
+static void
+test_no_collective_cause_mode(int selection_mode)
+{
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_global_expected = 0;
+
+ const char *filename;
+ const char *test_name;
+ hbool_t is_chunked = 1;
+ hbool_t is_independent = 0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int *buffer;
+ int i;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hsize_t chunk_dims[RANK];
+ herr_t ret;
+ /* set to global value as default */
+ int l_facc_type = facc_type;
+ char message[256];
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((ret >= 0), "set COMPACT layout succeeded");
+ is_chunked = 0;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
+ VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
+ is_chunked = 0;
+ }
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ sid = H5Screate(H5S_NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ is_chunked = 0;
+ }
+ else {
+ /* Create the basic Space */
+ /* if this is a compact dataset, create a small dataspace that does not exceed 64K */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ dims[0] = ROW_FACTOR * 6;
+ dims[1] = COL_FACTOR * 6;
+ }
+ else {
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ }
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* If we are not testing contiguous datasets */
+ if (is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+ /*
+ * Set expected causes and some tweaks based on the type of test
+ */
+ if (selection_mode & TEST_DATATYPE_CONVERSION) {
+ test_name = "Broken Collective I/O - Datatype Conversion";
+ no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ /* set different sign to trigger type conversion */
+ data_type = H5T_NATIVE_UINT;
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ test_name = "Broken Collective I/O - DATA Transforms";
+ no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ }
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ test_name = "Broken Collective I/O - No Simple or Scalar DataSpace";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ }
+
+ if (selection_mode & TEST_COLLECTIVE) {
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
+ }
+
+ if (selection_mode & TEST_SET_INDEPENDENT) {
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
+ /* switch to independent io */
+ is_independent = 1;
+ }
+
+ /* use all spaces for certain tests */
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ file_space = H5S_ALL;
+ mem_space = H5S_ALL;
+ }
+ else {
+ /* Get the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Create the memory dataspace */
+ mem_space = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+ }
+
+ /* Get the number of elements in the selection */
+ length = (int)(dims[0] * dims[1]);
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if (is_independent) {
+ /* Set Independent I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ else {
+ /* Set Collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ ret = H5Pset_data_transform(dxpl_write, "x+1");
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+ }
+
+ /*---------------------
+ * Test Write access
+ *---------------------*/
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
+ &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
+
+ /*---------------------
+ * Test Read access
+ *---------------------*/
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
+
+ /* Check write vs read */
+ VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
+ "reading and writing are the same for local cause of Broken Collective I/O");
+ VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
+ "reading and writing are the same for global cause of Broken Collective I/O");
+
+ /* Test values */
+ HDmemset(message, 0, sizeof(message));
+ HDsnprintf(message, sizeof(message),
+ "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
+ HDmemset(message, 0, sizeof(message));
+ HDsnprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
+
+ /* Release some resources */
+ if (sid)
+ H5Sclose(sid);
+ if (dcpl)
+ H5Pclose(dcpl);
+ if (dxpl_write)
+ H5Pclose(dxpl_write);
+ if (dxpl_read)
+ H5Pclose(dxpl_read);
+ if (dataset)
+ H5Dclose(dataset);
+ if (mem_space)
+ H5Sclose(mem_space);
+ if (file_space)
+ H5Sclose(file_space);
+ if (fid)
+ H5Fclose(fid);
+ HDfree(buffer);
+
+ /* clean up external file */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL)
+ H5Fdelete(FILE_EXTERNAL, fapl);
+
+ if (fapl)
+ H5Pclose(fapl);
+
+ return;
+}
+
+/* Function: no_collective_cause_tests
+ *
+ * Purpose: Tests cases for broken collective IO.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+void
+no_collective_cause_tests(void)
+{
+ /*
+ * Test individual cause
+ */
+ test_no_collective_cause_mode(TEST_COLLECTIVE);
+ test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+
+ /*
+ * Test combined causes
+ */
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
+ TEST_DATA_TRANSFORMS);
+
+ return;
+}
+
+/*
+ * Test consistency semantics of atomic mode
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create a dataset,
+ * where process 0 writes and the other processes read at the same
+ * time. If atomic mode is set correctly, the other processes should
+ * read the old values in the dataset or the new ones.
+ */
+
+void
+dataset_atomicity(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ const char *filename;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64;
+ dim1 = 32;
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (facc_type != FACC_MPIO) {
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
+ return;
+ }
+ if (VERBOSE_MED)
+ HDprintf("atomic writes to file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ buf_size = dim0 * dim1;
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create datasets */
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* initialize datasets to 0s */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ MPI_Barrier(comm);
+
+ /* make sure setting atomicity fails on a serial file ID */
+ /* file locking allows only one file open (serial) for writing */
+ if (MAINPROCESS) {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ }
+ H5E_END_TRY
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier(comm);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
+
+ /* open dataset1 (contiguous case) */
+ dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ if (0 == mpi_rank) {
+ for (i = 0; i < buf_size; i++) {
+ write_buf[i] = 5;
+ }
+ }
+ else {
+ for (i = 0; i < buf_size; i++) {
+ read_buf[i] = 8;
+ }
+ }
+
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ MPI_Barrier(comm);
+
+ /* Process 0 writes contiguously to the entire dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ }
+ /* The other processes read the entire dataset */
+ else {
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+ }
+
+ if (VERBOSE_MED) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 0 (read happened before process 0 wrote to dataset 1), or 5
+ (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare = read_buf[0];
+
+ VRFY((compare == 0 || compare == 5),
+ "Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
+ for (i = 1; i < buf_size; i++) {
+ if (read_buf[i] != compare) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
+ read_buf[i], compare);
+ nerrors++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5D close succeeded");
+
+ /* release data buffers */
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ /* open dataset2 (non-contiguous case) */
+ dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
+
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ for (i = 0; i < buf_size; i++) {
+ write_buf[i] = 5;
+ }
+ for (i = 0; i < buf_size; i++) {
+ read_buf[i] = 8;
+ }
+
+ atomicity = FALSE;
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ block[0] = (hsize_t)(dim0 / mpi_size - 1);
+ block[1] = (hsize_t)(dim1 / mpi_size - 1);
+ stride[0] = block[0] + 1;
+ stride[1] = block[1] + 1;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
+ start[0] = 0;
+ start[1] = 0;
+
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ MPI_Barrier(comm);
+
+ /* Process 0 writes to the dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+ /* All processes wait for the write to finish. This works because
+ atomicity is set to true */
+ MPI_Barrier(comm);
+ /* The other processes read the entire dataset */
+ if (0 != mpi_rank) {
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+ }
+
+ if (VERBOSE_MED) {
+ if (mpi_rank == 1) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
+ }
+ HDprintf("\n");
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 5 (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare;
+ i = 0;
+ j = 0;
+ k = 0;
+
+ compare = 5;
+
+ for (i = 0; i < dim0; i++) {
+ if (i >= mpi_rank * ((int)block[0] + 1)) {
+ break;
+ }
+ if ((i + 1) % ((int)block[0] + 1) == 0) {
+ k += dim1;
+ continue;
+ }
+ for (j = 0; j < dim1; j++) {
+ if (j >= mpi_rank * ((int)block[1] + 1)) {
+ k += dim1 - mpi_rank * ((int)block[1] + 1);
+ break;
+ }
+ if ((j + 1) % ((int)block[1] + 1) == 0) {
+ k++;
+ continue;
+ }
+ else if (compare != read_buf[k]) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
+ k, read_buf[k], compare);
+ nerrors++;
+ }
+ k++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* release data buffers */
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+}
+
+/* Function: dense_attr_test
+ *
+ * Purpose: Test cases for writing dense attributes in parallel
+ *
+ * Programmer: Quincey Koziol
+ * Date: April, 2013
+ */
+void
+test_dense_attr(void)
+{
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
+ const char *filename;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* get filename */
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ fpid = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((status >= 0), "H5Pset_libver_bounds succeeded");
+ status = H5Pset_fapl_mpio(fpid, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((status >= 0), "H5Pset_fapl_mpio succeeded");
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+ status = H5Pclose(fpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ gpid = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_attr_phase_change(gpid, 0, 0);
+ VRFY((status >= 0), "H5Pset_attr_phase_change succeeded");
+ gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT);
+ VRFY((gid > 0), "H5Gcreate2 succeeded");
+ status = H5Pclose(gpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
+ VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
+ atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((atid > 0), "H5Acreate succeeded");
+ status = H5Sclose(atFileSpace);
+ VRFY((status >= 0), "H5Sclose succeeded");
+
+ status = H5Aclose(atid);
+ VRFY((status >= 0), "H5Aclose succeeded");
+
+ status = H5Gclose(gid);
+ VRFY((status >= 0), "H5Gclose succeeded");
+ status = H5Fclose(fid);
+ VRFY((status >= 0), "H5Fclose succeeded");
+
+ return;
+}
diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c
new file mode 100644
index 0000000..936454a
--- /dev/null
+++ b/testpar/API/t_file.c
@@ -0,0 +1,1032 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for file operations
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Iprivate.h"
+#include "H5PBprivate.h"
+
+/*
+ * This file needs to access private information from the H5F package.
+ */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#include "H5ACpkg.h"
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#include "H5Cpkg.h"
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h"
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#include "H5MFpkg.h"
+#endif
+
+#define NUM_DSETS 5
+
+int mpi_size, mpi_rank;
+
+#if 0
+static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size);
+#endif
+
+/*
+ * test file access by communicator besides COMM_WORLD.
+ * Split COMM_WORLD into two, one (even_comm) contains the original
+ * processes of even ranks. The other (odd_comm) contains the original
+ * processes of odd ranks. Processes in even_comm creates a file, then
+ * cloose it, using even_comm. Processes in old_comm just do a barrier
+ * using odd_comm. Then they all do a barrier using COMM_WORLD.
+ * If the file creation and cloose does not do correct collective action
+ * according to the communicator argument, the processes will freeze up
+ * sooner or later due to barrier mixed up.
+ */
+void
+test_split_comm_access(void)
+{
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int is_old, mrc;
+ int newrank, newprocs;
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
+ const char *filename;
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters()*/;
+ if (VERBOSE_MED)
+ HDprintf("Split Communicator access test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ is_old = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ MPI_Comm_size(comm, &newprocs);
+ MPI_Comm_rank(comm, &newrank);
+
+ if (is_old) {
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+ else {
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+ MPI_Comm_rank(comm, &sub_mpi_rank);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "");
+
+ /* delete the test file */
+ ret = H5Fdelete(filename, acc_tpl);
+ VRFY((ret >= 0), "H5Fdelete succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+ }
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded");
+}
+
+#if 0
+void
+test_page_buffer_access(void)
+{
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl, fapl;
+ size_t page_count = 0;
+ int i, num_elements = 200;
+ haddr_t raw_addr, meta_addr;
+ int *data;
+ H5F_t *f = NULL;
+ herr_t ret; /* generic return value */
+ const char *filename;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = (const char *)GetTestParameters();
+
+ if (VERBOSE_MED)
+ HDprintf("Page Buffer Usage in Parallel %s\n", filename);
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "");
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
+ VRFY((ret == 0), "");
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128);
+ VRFY((ret == 0), "");
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0);
+ VRFY((ret == 0), "");
+
+ /* This should fail because collective metadata writes are not supported with page buffering */
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VRFY((file_id < 0), "H5Fcreate failed");
+
+ /* disable collective metadata writes for page buffering to work */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100,
+ sizeof(int) * 100000);
+ VRFY((ret == 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100,
+ sizeof(int) * 100000);
+ VRFY((ret == 0), "");
+
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100);
+ VRFY((ret == 0), "");
+
+ data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements);
+
+ /* initialize all the elements to have a value of -1 */
+ for (i = 0; i < num_elements; i++)
+ data[i] = -1;
+ if (MAINPROCESS) {
+ hid_t fapl_self = H5I_INVALID_HID;
+ fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
+
+ ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ page_count += 2;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5PB_flush(f->shared);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ ret = H5Pclose(fapl_self);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (mpi_size > 1) {
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ page_count += 1;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* reset the first 50 elements to -1*/
+ for (i = 0; i < 50; i++)
+ data[i] = -1;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ HDfree(data);
+ data = NULL;
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+static int
+create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
+{
+ hid_t file_id, dset_id, grp_id;
+ hid_t sid, mem_dataspace;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK], i;
+ hsize_t num_elements;
+ int k;
+ char dset_name[20];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ cache_ptr->ignore_tags = TRUE;
+ H5C_stats__reset(cache_ptr);
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+
+ ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+ /* put some trivial data in the data_array */
+ for (i = 0; i < num_elements; i++)
+ data_array[i] = mpi_rank + 1;
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for (k = 0; k < NUM_DSETS; k++) {
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDmemset(data_array, 0, num_elements * sizeof(DATATYPE));
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ for (i = 0; i < num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ }
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ HDfree(data_array);
+ return 0;
+} /* create_file */
+
+static int
+open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size)
+{
+ hid_t file_id, dset_id, grp_id, grp_id2;
+ hid_t sid, mem_dataspace;
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK];
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ int i, k, ndims;
+ hsize_t num_elements;
+ char dset_name[20];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
+
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ VRFY((f->shared->page_buf != NULL), "");
+ VRFY((f->shared->page_buf->page_size == page_size), "");
+ VRFY((f->shared->page_buf->max_size == page_buffer_size), "");
+
+ grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for (k = 0; k < NUM_DSETS; k++) {
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ sid = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "H5Dget_space succeeded");
+
+ ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
+ VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+
+ for (i = 0; i < (int)num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
+ }
+
+ grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ VRFY((ret == 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ H5C_cache_entry_t *entry_ptr = NULL;
+
+ entry_ptr = cache_ptr->index[i];
+
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty == FALSE);
+
+ if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
+ ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
+ VRFY((ret == 0), "");
+ }
+
+ entry_ptr = entry_ptr->ht_next;
+ }
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ HDfree(data_array);
+
+ return nerrors;
+}
+#endif
+
+/*
+ * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
+ * incoming fapl that could conflict with the existing values in H5F_shared_t on
+ * multiple opens of the same file.
+ */
+void
+test_file_properties(void)
+{
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
+ hbool_t is_coll;
+ htri_t are_equal;
+ const char *filename;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ MPI_Comm comm_out = MPI_COMM_NULL;
+ MPI_Info info_out = MPI_INFO_NULL;
+ herr_t ret; /* Generic return value */
+ int mpi_ret; /* MPI return value */
+ int cmp; /* Compare value */
+
+ /* set up MPI parameters */
+ mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
+ mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ mpi_ret = MPI_Info_create(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio");
+
+ /* Check getting and setting MPI properties
+ * (for use in VOL connectors, not the MPI-I/O VFD)
+ */
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out);
+ VRFY((ret >= 0), "H5Pget_mpi_params succeeded");
+
+ /* Check the communicator */
+ VRFY((comm != comm_out), "Communicators should not be bitwise identical");
+ cmp = MPI_UNEQUAL;
+ mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
+ VRFY((ret >= 0), "MPI_Comm_compare succeeded");
+ VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
+
+ /* Check the info object */
+ VRFY((info != info_out), "Info objects should not be bitwise identical");
+
+ /* Free the obtained comm and info object */
+ mpi_ret = MPI_Comm_free(&comm_out);
+ VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded");
+ mpi_ret = MPI_Info_free(&info_out);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
+ /* Copy the fapl and ensure it's equal to the original */
+ fapl_copy_id = H5Pcopy(fapl_id);
+ VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
+ /* Add a property to the copy and ensure it's different now */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Add a property with the same key but a different value to the original
+ * and ensure they are still different.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Set the second property in the original to the same
+ * value as the copy and ensure they are the same now.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
+ /* create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Open the file with the MPI-IO driver */
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Open the file with the MPI-IO driver w/ collective settings */
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
+ /* Collective metadata writes */
+ ret = H5Pset_coll_metadata_write(fapl_id, TRUE);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ /* Collective metadata read API calling requirement */
+ ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+
+ /* close fapl and retrieve it from file */
+ ret = H5Pclose(fapl_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ fapl_id = H5I_INVALID_HID;
+
+ fapl_id = H5Fget_access_plist(fid);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+
+ /* close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Release file-access plist */
+ ret = H5Pclose(fapl_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fapl_copy_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Free the MPI info object */
+ mpi_ret = MPI_Info_free(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
+} /* end test_file_properties() */
+
+void
+test_delete(void)
+{
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ const char *filename = NULL;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */
+ herr_t ret; /* Generic return value */
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
+
+ /* create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((SUCCEED == ret), "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VRFY((TRUE == is_hdf5), "H5Fis_accessible");
+
+ /* Delete the file */
+ ret = H5Fdelete(filename, fapl_id);
+ VRFY((SUCCEED == ret), "H5Fdelete");
+
+ /* Verify that the file is NO LONGER an HDF5 file */
+ /* This should fail since there is no file */
+ H5E_BEGIN_TRY
+ {
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible");
+
+ /* Release file-access plist */
+ ret = H5Pclose(fapl_id);
+ VRFY((SUCCEED == ret), "H5Pclose");
+
+} /* end test_delete() */
diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c
new file mode 100644
index 0000000..4f4fa96
--- /dev/null
+++ b/testpar/API/t_file_image.c
@@ -0,0 +1,371 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for file image operations
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/* file_image_daisy_chain_test
+ *
+ * Process zero:
+ *
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
+ *
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
+ *
+ * 3) Flushes the core file, and gets an image of it. Closes
+ * the core file.
+ *
+ * 4) Sends the image to process 1.
+ *
+ * 5) Awaits receipt on a file image from process n-1.
+ *
+ * 6) opens the image received from process n-1, verifies that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1)
+ *
+ * 7) closes the core file and exits.
+ *
+ * Process i (0 < i < n)
+ *
+ * 1) Await receipt of file image from process (i - 1).
+ *
+ * 2) Open the image with the core file driver, verify that i
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ *
+ * 3) Set v[i] = i in the core file.
+ *
+ * 4) Flush the core file and send it to process (i + 1) % n.
+ *
+ * 5) close the core file and exit.
+ *
+ * Test fails on a hang (if an image is not received), or on invalid data.
+ *
+ * JRM -- 11/28/11
+ */
+void
+file_image_daisy_chain_test(void)
+{
+ char file_name[1024] = "\0";
+ int mpi_size, mpi_rank;
+ int mpi_result;
+ int i;
+ int space_ndims;
+ MPI_Status rcvstat;
+ int *vector_ptr = NULL;
+ hid_t fapl_id = -1;
+ hid_t file_id; /* file IDs */
+ hid_t dset_id = -1;
+ hid_t dset_type_id = -1;
+ hid_t space_id = -1;
+ herr_t err;
+ hsize_t dims[1];
+ void *image_ptr = NULL;
+ ssize_t bytes_read;
+ ssize_t image_len;
+ hbool_t vector_ok = TRUE;
+ htri_t tri_result;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file name */
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
+
+ if (mpi_rank == 0) {
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "created core file");
+
+ dims[0] = (hsize_t)mpi_size;
+ space_id = H5Screate_simple(1, dims, dims);
+ VRFY((space_id >= 0), "created data space");
+
+ dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "created data set");
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
+ */
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory representation of vector");
+
+ vector_ptr[0] = 0;
+ for (i = 1; i < mpi_size; i++)
+ vector_ptr[i] = -1;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote initial data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 3) Flush the core file, and get an image of it. Close
+ * the core file.
+ */
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got image file size");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+ /* 4) Send the image to process 1. */
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+ /* 5) Await receipt on a file image from process n-1. */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1");
+
+ /* 6) open the image received from process n-1, verify that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1).
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++)
+ if (vector_ptr[i] != i)
+ vector_ok = FALSE;
+ VRFY((vector_ok), "verified received vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 7) closes the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+ }
+ else {
+ /* 1) Await receipt of file image from process (i - 1). */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1");
+
+ /* 2) Open the image with the core file driver, verify that it
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ H5Eprint2(H5P_DEFAULT, stderr);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++) {
+ if (i < mpi_rank) {
+ if (vector_ptr[i] != i)
+ vector_ok = FALSE;
+ }
+ else {
+ if (vector_ptr[i] != -1)
+ vector_ok = FALSE;
+ }
+ }
+ VRFY((vector_ok), "verified received vector.");
+
+ /* 3) Set v[i] = i in the core file. */
+
+ vector_ptr[mpi_rank] = mpi_rank;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote modified data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
+
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got (possibly modified) image file len");
+
+ image_ptr = (void *)HDrealloc((void *)image_ptr, (size_t)image_len);
+ VRFY(image_ptr != NULL, "re-allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE,
+ (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+ /* 5) close the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+ }
+
+ return;
+
+} /* file_image_daisy_chain_test() */
diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c
new file mode 100644
index 0000000..f32c21b
--- /dev/null
+++ b/testpar/API/t_filter_read.c
@@ -0,0 +1,564 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This verifies the correctness of parallel reading of a dataset that has been
+ * written serially using filters.
+ *
+ * Created by: Christian Chilan
+ * Date: 2007/05/15
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#ifdef H5_HAVE_SZLIB_H
+#include "szlib.h"
+#endif
+
+static int mpi_size, mpi_rank;
+
+/* Chunk sizes */
+#define CHUNK_DIM1 7
+#define CHUNK_DIM2 27
+
+/* Sizes of the vertical hyperslabs. Total dataset size is
+ {HS_DIM1, HS_DIM2 * mpi_size } */
+#define HS_DIM1 200
+#define HS_DIM2 100
+
+const char *
+h5_rmprefix(const char *filename)
+{
+ const char *ret_ptr;
+
+ if ((ret_ptr = HDstrstr(filename, ":")) == NULL)
+ ret_ptr = filename;
+ else
+ ret_ptr++;
+
+ return (ret_ptr);
+}
+
+#ifdef H5_HAVE_FILTER_SZIP
+
+/*-------------------------------------------------------------------------
+ * Function: h5_szip_can_encode
+ *
+ * Purpose: Retrieve the filter config flags for szip, tell if
+ * encoder is available.
+ *
+ * Return: 1: decode+encode is enabled
+ * 0: only decode is enabled
+ * -1: other
+ *
+ * Programmer:
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+h5_szip_can_encode(void)
+{
+ unsigned int filter_config_flags;
+
+ H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags);
+ if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) {
+ /* filter present but neither encode nor decode is supported (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_DECODE_ENABLED) {
+ /* decoder only: read but not write */
+ return 0;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_ENCODE_ENABLED) {
+ /* encoder only: write but not read (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) {
+ return 1;
+ }
+ return (-1);
+}
+#endif /* H5_HAVE_FILTER_SZIP */
+
+/*-------------------------------------------------------------------------
+ * Function: filter_read_internal
+ *
+ * Purpose: Tests parallel reading of a 2D dataset written serially using
+ * filters. During the parallel reading phase, the dataset is
+ * divided evenly among the processors in vertical hyperslabs.
+ *
+ * Programmer: Christian Chilan
+ * Tuesday, May 15, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
+{
+ hid_t file, dataset; /* HDF5 IDs */
+ hid_t access_plist; /* Access property list ID */
+ hid_t sid, memspace; /* Dataspace IDs */
+ hsize_t size[2]; /* Dataspace dimensions */
+ hsize_t hs_offset[2]; /* Hyperslab offset */
+ hsize_t hs_size[2]; /* Hyperslab size */
+ size_t i, j; /* Local index variables */
+ char name[32] = "dataset";
+ herr_t hrc; /* Error status */
+ int *points = NULL; /* Writing buffer for entire dataset */
+ int *check = NULL; /* Reading buffer for selected hyperslab */
+
+ (void)dset_size; /* silence compiler */
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* set sizes for dataset and hyperslabs */
+ hs_size[0] = size[0] = HS_DIM1;
+ hs_size[1] = HS_DIM2;
+
+ size[1] = hs_size[1] * (hsize_t)mpi_size;
+
+ hs_offset[0] = 0;
+ hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank;
+
+ /* Create the data space */
+ sid = H5Screate_simple(2, size, NULL);
+ VRFY(sid >= 0, "H5Screate_simple");
+
+ /* Create buffers */
+ points = (int *)HDmalloc(size[0] * size[1] * sizeof(int));
+ VRFY(points != NULL, "HDmalloc");
+
+ check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int));
+ VRFY(check != NULL, "HDmalloc");
+
+ /* Initialize writing buffer with random data */
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ points[i * size[1] + j] = (int)(i + j + 7);
+
+ VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability");
+
+ /* Serial write phase */
+ if (MAINPROCESS) {
+
+ file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY(file >= 0, "H5Fcreate");
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY(dataset >= 0, "H5Dcreate2");
+
+ hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points);
+ VRFY(hrc >= 0, "H5Dwrite");
+#if 0
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size > 0, "H5Dget_storage_size");
+#endif
+
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
+
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Parallel read phase */
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "H5Pcreate");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "H5Pset_fapl_mpio");
+
+ /* Open the file */
+ file = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((file >= 0), "H5Fopen");
+
+ dataset = H5Dopen2(file, name, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2");
+
+ hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
+ VRFY(hrc >= 0, "H5Sselect_hyperslab");
+
+ memspace = H5Screate_simple(2, hs_size, NULL);
+ VRFY(memspace >= 0, "H5Screate_simple");
+
+ hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check);
+ VRFY(hrc >= 0, "H5Dread");
+
+ /* Check that the values read are the same as the values written */
+ for (i = 0; i < hs_size[0]; i++) {
+ for (j = 0; j < hs_size[1]; j++) {
+ if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) {
+ HDfprintf(stderr, " Read different values than written.\n");
+ HDfprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i),
+ (unsigned long)(hs_offset[1] + j));
+ HDfprintf(stderr, " At original: %d\n",
+ (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
+ HDfprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
+ VRFY(FALSE, "");
+ }
+ }
+ }
+#if 0
+ /* Get the storage size of the dataset */
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size != 0, "H5Dget_storage_size");
+#endif
+
+ /* Clean up objects used for this test */
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
+
+ hrc = H5Sclose(sid);
+ VRFY(hrc >= 0, "H5Sclose");
+
+ hrc = H5Sclose(memspace);
+ VRFY(hrc >= 0, "H5Sclose");
+
+ hrc = H5Pclose(access_plist);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
+
+ HDfree(points);
+ HDfree(check);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_filter_read
+ *
+ * Purpose: Tests parallel reading of datasets written serially using
+ * several (combinations of) filters.
+ *
+ * Programmer: Christian Chilan
+ * Tuesday, May 15, 2007
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+test_filter_read(void)
+{
+ hid_t dc; /* HDF5 IDs */
+ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
+#if 0
+ hsize_t null_size; /* Size of dataset without filters */
+#endif
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ herr_t hrc;
+ const char *filename;
+#ifdef H5_HAVE_FILTER_FLETCHER32
+ hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+#endif
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ hsize_t deflate_size; /* Size of dataset with deflate filter */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+#ifdef H5_HAVE_FILTER_SZIP
+ hsize_t szip_size; /* Size of dataset with szip filter */
+ unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK;
+ unsigned szip_pixels_per_block = 4;
+#endif /* H5_HAVE_FILTER_SZIP */
+
+#if 0
+ hsize_t shuffle_size; /* Size of dataset with shuffle filter */
+#endif
+
+#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
+ hsize_t combo_size; /* Size of dataset with multiple filters */
+#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ if (VERBOSE_MED)
+ HDprintf("Parallel reading of dataset written with filters %s\n", filename);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, dataset or filter aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /*----------------------------------------------------------
+ * STEP 0: Test without filters.
+ *----------------------------------------------------------
+ */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ filter_read_internal(filename, dc, /* &null_size */ NULL);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Run steps 1-3 both with and without filters disabled on partial chunks */
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Set chunk options appropriately */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ hrc = H5Pget_chunk_opts(dc, &chunk_opts);
+ VRFY(hrc >= 0, "H5Pget_chunk_opts");
+
+ if (disable_partial_chunk_filters)
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /*----------------------------------------------------------
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_FLETCHER32
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pset_filter");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ filter_read_internal(filename, dc, &fletcher32_size);
+ VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_FLETCHER32 */
+
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ filter_read_internal(filename, dc, &deflate_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+ if (h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ filter_read_internal(filename, dc, &szip_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+ }
+#endif /* H5_HAVE_FILTER_SZIP */
+ } /* end for */
+
+ /*----------------------------------------------------------
+ * STEP 4: Test shuffling by itself.
+ *----------------------------------------------------------
+ */
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ filter_read_internal(filename, dc, /* &shuffle_size */ NULL);
+#if 0
+ VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
+#endif
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /*----------------------------------------------------------
+ * STEP 5: Test shuffle + deflate + checksum in any order.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Testing shuffle+deflate+checksum filters (checksum first) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Testing shuffle+deflate+checksum filters (checksum last) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 6: Test shuffle + szip + checksum in any order.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+
+ /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ /* Make sure encoding is enabled */
+ if (h5_szip_can_encode() == 1) {
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ filter_read_internal(filename, dc, &combo_size);
+ }
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
+ /* Make sure encoding is enabled */
+ if (h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+ }
+
+#endif /* H5_HAVE_FILTER_SZIP */
+}
diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c
new file mode 100644
index 0000000..e11818f
--- /dev/null
+++ b/testpar/API/t_mdset.c
@@ -0,0 +1,2814 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5Dprivate.h"
+#include "H5private.h"
+#endif
+
+#define DIM 2
+#define SIZE 32
+#define NDATASET 4
+#define GROUP_DEPTH 32
+enum obj_type { is_group, is_dset };
+
+static int get_size(void);
+static void write_dataset(hid_t, hid_t, hid_t);
+static int read_dataset(hid_t, hid_t, hid_t);
+static void create_group_recursive(hid_t, hid_t, hid_t, int);
+static void recursive_read_group(hid_t, hid_t, hid_t, int);
+static void group_dataset_read(hid_t fid, int mpi_rank, int m);
+static void write_attribute(hid_t, int, int);
+static int read_attribute(hid_t, int, int);
+static int check_value(DATATYPE *, DATATYPE *, int);
+static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
+
+/*
+ * The size value computed by this function is used extensively in
+ * configuring tests for the current number of processes.
+ *
+ * This function was created as part of an effort to allow the
+ * test functions in this file to run on an arbitrary number of
+ * processors.
+ * JRM - 8/11/04
+ */
+
+static int
+get_size(void)
+{
+ int mpi_rank;
+ int mpi_size;
+ int size = SIZE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ if (mpi_size > size) {
+ if ((mpi_size % 2) == 0) {
+ size = mpi_size;
+ }
+ else {
+ size = mpi_size + 1;
+ }
+ }
+
+ VRFY((mpi_size <= size), "mpi_size <= size");
+ VRFY(((size % 2) == 0), "size isn't even");
+
+ return (size);
+
+} /* get_size() */
+
+/*
+ * Example of using PHDF5 to create a zero sized dataset.
+ *
+ */
+void
+zero_dim_dset(void)
+{
+ int mpi_size, mpi_rank;
+ const char *filename;
+ hid_t fid, plist, dcpl, dsid, sid;
+ hsize_t dim, chunk_dim;
+ herr_t ret;
+ int data[1];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
+
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+ ret = H5Pclose(plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "failed H5Pcreate");
+
+ /* Set 1 chunk size */
+ chunk_dim = 1;
+ ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
+ VRFY((ret >= 0), "failed H5Pset_chunk");
+
+ /* Create 1D dataspace with 0 dim size */
+ dim = 0;
+ sid = H5Screate_simple(1, &dim, NULL);
+ VRFY((sid >= 0), "failed H5Screate_simple");
+
+ /* Create chunked dataset */
+ dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dsid >= 0), "failed H5Dcreate2");
+
+ /* write 0 elements from dataset */
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret >= 0), "failed H5Dwrite");
+
+ /* Read 0 elements from dataset */
+ ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret >= 0), "failed H5Dread");
+
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+}
+
+/*
+ * Example of using PHDF5 to create ndatasets datasets. Each process write
+ * a slab of array to the file.
+ */
+void
+multiple_dset_write(void)
+{
+ int i, j, n, mpi_size, mpi_rank, size;
+ hid_t iof, plist, dataset, memspace, filespace;
+ hid_t dcpl; /* Dataset creation property list */
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM];
+ hsize_t count[DIM] = {1, 1};
+ double *outme = NULL;
+ double fill = 1.0; /* Fill value */
+ char dname[100];
+ herr_t ret;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ndatasets;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ndatasets = pt->count; */ ndatasets = NDATASETS;
+
+ size = get_size();
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+ ret = H5Pclose(plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ memspace = H5Screate_simple(DIM, chunk_dims, NULL);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mdata hyperslab selection");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
+
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
+ VRFY((ret >= 0), "set fill-value succeeded");
+
+ for (n = 0; n < ndatasets; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset %d", n);
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), dname);
+
+ /* calculate data to write */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
+
+ H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
+
+ H5Dclose(dataset);
+#ifdef BARRIER_CHECKS
+ if (!((n + 1) % 10)) {
+ HDprintf("created %d datasets\n", n + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(dcpl);
+ H5Fclose(iof);
+
+ HDfree(outme);
+}
+
+/* Example of using PHDF5 to create, write, and read compact dataset.
+ */
+void
+compact_dataset(void)
+{
+ int i, j, mpi_size, mpi_rank, size, err_num = 0;
+ hid_t iof, plist, dcpl, dxpl, dataset, filespace;
+ hsize_t file_dims[DIM];
+ double *outme;
+ double *inme;
+ char dname[] = "dataset";
+ herr_t ret;
+ const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
+
+ size = get_size();
+
+ for (i = 0; i < DIM; i++)
+ file_dims[i] = (hsize_t)size;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ inme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for inme");
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ VRFY((mpi_size <= size), "mpi_size <= size");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+
+ /* Define data space */
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+
+ /* Create a compact dataset */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((dcpl >= 0), "set property list for compact dataset");
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ VRFY((ret >= 0), "set space allocation time for compact dataset");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Recalculate data to write. Each process writes the same data. */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000;
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ H5Pclose(dcpl);
+ H5Pclose(plist);
+ H5Dclose(dataset);
+ H5Sclose(filespace);
+ H5Fclose(iof);
+
+ /* Open the file and dataset, read and compare the data. */
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((iof >= 0), "H5Fopen succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "H5Pinsert2() succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "H5Pget succeeded");
+ VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),
+ "rank 0 Bcast optimization was performed for a compact dataset");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify data value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j,
+ outme[(i * size) + j], inme[(i * size) + j]);
+
+ H5Pclose(plist);
+ H5Pclose(dxpl);
+ H5Dclose(dataset);
+ H5Fclose(iof);
+ HDfree(inme);
+ HDfree(outme);
+}
+
+/*
+ * Example of using PHDF5 to create, write, and read dataset and attribute
+ * of Null dataspace.
+ */
+void
+null_dataset(void)
+{
+ int mpi_size, mpi_rank;
+ hid_t iof, plist, dxpl, dataset, attr, sid;
+ unsigned uval = 2; /* Buffer for writing to dataset */
+ int val = 1; /* Buffer for writing to attribute */
+ hssize_t nelem;
+ char dname[] = "dataset";
+ char attr_name[] = "attribute";
+ herr_t ret;
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or attribute aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+
+ /* Define data space */
+ sid = H5Screate(H5S_NULL);
+
+ /* Check that the null dataspace actually has 0 elements */
+ nelem = H5Sget_simple_extent_npoints(sid);
+ VRFY((nelem == 0), "H5Sget_simple_extent_npoints");
+
+ /* Create a compact dataset */
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Write "nothing" to the dataset(with type conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Create an attribute for the group */
+ attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((attr >= 0), "H5Acreate2");
+
+ /* Write "nothing" to the attribute(with type conversion) */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, &val);
+ VRFY((ret >= 0), "H5Awrite");
+
+ H5Aclose(attr);
+ H5Dclose(dataset);
+ H5Pclose(plist);
+ H5Sclose(sid);
+ H5Fclose(iof);
+
+ /* Open the file and dataset, read and compare the data. */
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((iof >= 0), "H5Fopen succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ /* Try reading from the dataset(make certain our buffer is unmodified) */
+ ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval);
+ VRFY((ret >= 0), "H5Dread");
+ VRFY((uval == 2), "H5Dread");
+
+ /* Open the attribute for the dataset */
+ attr = H5Aopen(dataset, attr_name, H5P_DEFAULT);
+ VRFY((attr >= 0), "H5Aopen");
+
+ /* Try reading from the attribute(make certain our buffer is unmodified) */ ret =
+ H5Aread(attr, H5T_NATIVE_INT, &val);
+ VRFY((ret >= 0), "H5Aread");
+ VRFY((val == 1), "H5Aread");
+
+ H5Pclose(plist);
+ H5Pclose(dxpl);
+ H5Aclose(attr);
+ H5Dclose(dataset);
+ H5Fclose(iof);
+}
+
+/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB)
+ * Actual data is _not_ written to these datasets. Dataspaces are exact
+ * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
+ * the boundary of interest.
+ */
+void
+big_dataset(void)
+{
+ int mpi_size, mpi_rank; /* MPI info */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dataset, /* Dataset ID */
+ filespace; /* Dataset's dataspace ID */
+ hsize_t file_dims[4]; /* Dimensions of dataspace */
+ char dname[] = "dataset"; /* Name of dataset */
+#if 0
+ MPI_Offset file_size; /* Size of file on disk */
+#endif
+ herr_t ret; /* Generic return value */
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* Verify MPI_Offset can handle larger than 2GB sizes */
+ VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4");
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Create >2GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 2GB dataspace */
+ file_dims[0] = 2;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
+#endif
+
+ /*
+ * Create >4GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 4GB dataspace */
+ file_dims[0] = 4;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
+#endif
+
+ /*
+ * Create >8GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 8GB dataspace */
+ file_dims[0] = 8;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
+#endif
+
+ /* Close fapl */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+}
+
+/* Example of using PHDF5 to read a partial written dataset. The dataset does
+ * not have actual data written to the entire raw data area and relies on the
+ * default fill value of zeros to work correctly.
+ */
+void
+dataset_fillvalue(void)
+{
+ int mpi_size, mpi_rank; /* MPI info */
+ int err_num; /* Number of errors */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dxpl, /* Data transfer property list ID */
+ dataset, /* Dataset ID */
+ memspace, /* Memory dataspace ID */
+ filespace; /* Dataset's dataspace ID */
+ char dname[] = "dataset"; /* Name of dataset */
+ hsize_t dset_dims[4] = {0, 6, 7, 8};
+ hsize_t req_start[4] = {0, 0, 0, 0};
+ hsize_t req_count[4] = {1, 6, 7, 8};
+ hsize_t dset_size; /* Dataset size */
+ int *rdata, *wdata; /* Buffers for data to read and write */
+ int *twdata, *trdata; /* Temporary pointer into buffer */
+ int acc, i, ii, j, k, l; /* Local index variables */
+ herr_t ret; /* Generic return value */
+ const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /* Set the dataset dimension to be one row more than number of processes */
+ /* and calculate the actual dataset size. */
+ dset_dims[0] = (hsize_t)(mpi_size + 1);
+ dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3];
+
+ /* Allocate space for the buffers */
+ rdata = HDmalloc((size_t)(dset_size * sizeof(int)));
+ VRFY((rdata != NULL), "HDcalloc succeeded for read buffer");
+ wdata = HDmalloc((size_t)(dset_size * sizeof(int)));
+ VRFY((wdata != NULL), "HDmalloc succeeded for write buffer");
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Create HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ filespace = H5Screate_simple(4, dset_dims, NULL);
+ VRFY((filespace >= 0), "File H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ memspace = H5Screate_simple(4, dset_dims, NULL);
+ VRFY((memspace >= 0), "Memory H5Screate_simple succeeded");
+
+ /*
+ * Read dataset before any data is written.
+ */
+
+ /* Create DXPL for I/O */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "testing property list inserted succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for (ii = 0; ii < 2; ii++) {
+
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify all data read are the fill value 0 */
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf(
+ "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ mpi_rank, i, j, k, l, *trdata);
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("Rank %d: [more errors ...]\n", mpi_rank);
+ if (err_num) {
+ HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
+ nerrors++;
+ }
+ }
+
+ /* Barrier to ensure all processes have completed the above test. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Each process writes 1 row of data. Thus last row is not written.
+ */
+ /* Create hyperslabs in memory and file dataspaces */
+ req_start[0] = (hsize_t)mpi_rank;
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
+
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Fill write buffer with some values */
+ twdata = wdata;
+ for (i = 0, acc = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++)
+ *twdata++ = acc++;
+
+ /* Collectively write a hyperslab of data to the dataset */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Barrier here, to allow processes to sync */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Read dataset after partial write.
+ */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), " H5Pset succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for (ii = 0; ii < 2; ii++) {
+
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify correct data read */
+ twdata = wdata;
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if (i < mpi_size) {
+ if (*twdata != *trdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n",
+ i, j, k, l, *twdata, *trdata);
+ } /* end if */
+ else {
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ i, j, k, l, *trdata);
+ } /* end else */
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num) {
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
+ }
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(memspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* Close dxpl */
+ ret = H5Pclose(dxpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Close fapl */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* free the buffers */
+ HDfree(rdata);
+ HDfree(wdata);
+}
+
+/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+void
+collective_group_write_independent_group_read(void)
+{
+ collective_group_write();
+ independent_group_read();
+}
+
+/* Write multiple groups with a chunked dataset in each group collectively.
+ * These groups and datasets are for testing independent read later.
+ */
+void
+collective_group_write(void)
+{
+ int mpi_rank, mpi_size, size;
+ int i, j, m;
+ char gname[64], dname[32];
+ hid_t fid, gid, did, plist, dcpl, memspace, filespace;
+ DATATYPE *outme = NULL;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
+ herr_t ret1, ret2;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ chunk_size[0] = (hsize_t)(size / 2);
+ chunk_size[1] = (hsize_t)(size / 2);
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate");
+ H5Pclose(plist);
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab in memory and file spaces. These two operations are
+ * identical since the datasets are the same. */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((memspace >= 0), "memspace");
+ VRFY((filespace >= 0), "filespace");
+ VRFY((ret1 == 0), "mgroup memspace selection");
+ VRFY((ret2 == 0), "mgroup filespace selection");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
+ VRFY((dcpl >= 0), "dataset creation property");
+ VRFY((ret1 == 0), "set chunk for dataset creation property");
+
+ /* creates ngroups groups under the root group, writes chunked
+ * datasets in parallel. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
+
+ ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ VRFY((ret1 == 0), "H5Dwrite");
+
+ ret1 = H5Dclose(did);
+ VRFY((ret1 == 0), "H5Dclose");
+
+ ret1 = H5Gclose(gid);
+ VRFY((ret1 == 0), "H5Gclose");
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ H5Pclose(dcpl);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+
+ ret1 = H5Fclose(fid);
+ VRFY((ret1 == 0), "H5Fclose");
+
+ HDfree(outme);
+}
+
+/* Let two sets of processes open and read different groups and chunked
+ * datasets independently.
+ */
+void
+independent_group_read(void)
+{
+ int mpi_rank, m;
+ hid_t plist, fid;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+ herr_t ret;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ H5Pset_all_coll_metadata_ops(plist, FALSE);
+
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((fid > 0), "H5Fopen");
+ H5Pclose(plist);
+
+ /* open groups and read datasets. Odd number processes read even number
+ * groups from the end; even number processes read odd number groups
+ * from the beginning. */
+ if (mpi_rank % 2 == 0) {
+ for (m = ngroups - 1; m == 0; m -= 2)
+ group_dataset_read(fid, mpi_rank, m);
+ }
+ else {
+ for (m = 0; m < ngroups; m += 2)
+ group_dataset_read(fid, mpi_rank, m);
+ }
+
+ ret = H5Fclose(fid);
+ VRFY((ret == 0), "H5Fclose");
+}
+
+/* Open and read datasets and compare data
+ */
+static void
+group_dataset_read(hid_t fid, int mpi_rank, int m)
+{
+ int ret, i, j, size;
+ char gname[64], dname[32];
+ hid_t gid, did;
+ DATATYPE *outdata = NULL;
+ DATATYPE *indata = NULL;
+
+ size = get_size();
+
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((indata != NULL), "HDmalloc succeeded for indata");
+
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
+
+ /* open every group under root group. */
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gopen2(fid, gname, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* check the data. */
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ did = H5Dopen2(gid, dname, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
+
+ /* this is the original value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
+
+ /* compare the original value(outdata) to the value in file(indata).*/
+ ret = check_value(indata, outdata, size);
+ VRFY((ret == 0), "check the data");
+
+ ret = H5Dclose(did);
+ VRFY((ret == 0), "H5Dclose");
+ ret = H5Gclose(gid);
+ VRFY((ret == 0), "H5Gclose");
+
+ HDfree(indata);
+ HDfree(outdata);
+}
+
+/*
+ * Example of using PHDF5 to create multiple groups. Under the root group,
+ * it creates ngroups groups. Under the first group just created, it creates
+ * recursive subgroups of depth GROUP_DEPTH. In each created group, it
+ * generates NDATASETS datasets. Each process write a hyperslab of an array
+ * into the file. The structure is like
+ *
+ * root group
+ * |
+ * ---------------------------- ... ... ------------------------
+ * | | | ... ... | |
+ * group0*+' group1*+' group2*+' ... ... group ngroups*+'
+ * |
+ * 1st_child_group*'
+ * |
+ * 2nd_child_group*'
+ * |
+ * :
+ * :
+ * |
+ * GROUP_DEPTHth_child_group*'
+ *
+ * * means the group has dataset(s).
+ * + means the group has attribute(s).
+ * ' means the datasets in the groups have attribute(s).
+ *
+ */
+void
+multiple_group_write(void)
+{
+ int mpi_rank, mpi_size, size;
+ int m;
+ char gname[64];
+ hid_t fid, gid, plist, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ herr_t ret;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ H5Pclose(plist);
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab in memory and file spaces. These two operations are
+ * identical since the datasets are the same. */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((memspace >= 0), "memspace");
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup memspace selection");
+
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((filespace >= 0), "filespace");
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup filespace selection");
+
+ /* creates ngroups groups under the root group, writes datasets in
+ * parallel. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* create attribute for these groups. */
+ write_attribute(gid, is_group, m);
+
+ if (m != 0)
+ write_dataset(memspace, filespace, gid);
+
+ H5Gclose(gid);
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ /* recursively creates subgroups under the first group. */
+ gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
+ create_group_recursive(memspace, filespace, gid, 0);
+ ret = H5Gclose(gid);
+ VRFY((ret >= 0), "H5Gclose");
+
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose");
+ ret = H5Sclose(memspace);
+ VRFY((ret >= 0), "H5Sclose");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose");
+}
+
+/*
+ * In a group, creates NDATASETS datasets. Each process writes a hyperslab
+ * of a data array to the file.
+ */
+static void
+write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
+{
+ int i, j, n, size;
+ int mpi_rank, mpi_size;
+ char dname[32];
+ DATATYPE *outme = NULL;
+ hid_t did;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ size = get_size();
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ for (n = 0; n < NDATASET; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
+
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+
+ /* create attribute for these datasets.*/
+ write_attribute(did, is_dset, n);
+
+ H5Dclose(did);
+ }
+ HDfree(outme);
+}
+
+/*
+ * Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets
+ * in parallel in each group.
+ */
+static void
+create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
+{
+ hid_t child_gid;
+ int mpi_rank;
+ char gname[64];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+#ifdef BARRIER_CHECKS
+ if (!((counter + 1) % 10)) {
+ HDprintf("created %dth child groups\n", counter + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((child_gid > 0), gname);
+
+ /* write datasets in parallel. */
+ write_dataset(memspace, filespace, gid);
+
+ if (counter < GROUP_DEPTH)
+ create_group_recursive(memspace, filespace, child_gid, counter + 1);
+
+ H5Gclose(child_gid);
+}
+
+/*
+ * This function is to verify the data from multiple group testing. It opens
+ * every dataset in every group and check their correctness.
+ */
+void
+multiple_group_read(void)
+{
+ int mpi_rank, mpi_size, error_num, size;
+ int m;
+ char gname[64];
+ hid_t plist, fid, gid, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ H5Pclose(plist);
+
+ /* decide hyperslab for each process */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab for memory and file space */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+
+ /* open every group under root group. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gopen2(fid, gname, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* check the data. */
+ if (m != 0)
+ if ((error_num = read_dataset(memspace, filespace, gid)) > 0)
+ nerrors += error_num;
+
+ /* check attribute.*/
+ error_num = 0;
+ if ((error_num = read_attribute(gid, is_group, m)) > 0)
+ nerrors += error_num;
+
+ H5Gclose(gid);
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10))
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+ }
+
+ /* open all the groups in vertical direction. */
+ gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
+ VRFY((gid > 0), "group0");
+ recursive_read_group(memspace, filespace, gid, 0);
+ H5Gclose(gid);
+
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Fclose(fid);
+}
+
+/*
+ * This function opens all the datasets in a certain, checks the data using
+ * dataset_vrfy function.
+ */
+static int
+read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
+{
+ int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0;
+ char dname[32];
+ DATATYPE *outdata = NULL, *indata = NULL;
+ hid_t did;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ size = get_size();
+
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((indata != NULL), "HDmalloc succeeded for indata");
+
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
+
+ for (n = 0; n < NDATASET; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ did = H5Dopen2(gid, dname, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
+
+ /* this is the original value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++) {
+ *outdata = n * 1000 + mpi_rank;
+ outdata++;
+ }
+ outdata -= size * size;
+
+ /* compare the original value(outdata) to the value in file(indata).*/
+ vrfy_errors = check_value(indata, outdata, size);
+
+ /* check attribute.*/
+ if ((attr_errors = read_attribute(did, is_dset, n)) > 0)
+ vrfy_errors += attr_errors;
+
+ H5Dclose(did);
+ }
+
+ HDfree(indata);
+ HDfree(outdata);
+
+ return vrfy_errors;
+}
+
+/*
+ * This recursive function opens all the groups in vertical direction and
+ * checks the data.
+ */
+static void
+recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
+{
+ hid_t child_gid;
+ int mpi_rank, err_num = 0;
+ char gname[64];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+#ifdef BARRIER_CHECKS
+ if ((counter + 1) % 10)
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+
+ if ((err_num = read_dataset(memspace, filespace, gid)))
+ nerrors += err_num;
+
+ if (counter < GROUP_DEPTH) {
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
+ VRFY((child_gid > 0), gname);
+ recursive_read_group(memspace, filespace, child_gid, counter + 1);
+ H5Gclose(child_gid);
+ }
+}
+
+/* Create and write attribute for a group or a dataset. For groups, attribute
+ * is a scalar datum; for dataset, it is a one-dimensional array.
+ */
+static void
+write_attribute(hid_t obj_id, int this_type, int num)
+{
+ hid_t sid, aid;
+ hsize_t dspace_dims[1] = {8};
+ int i, mpi_rank, attr_data[8], dspace_rank = 1;
+ char attr_name[32];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (this_type == is_group) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ sid = H5Screate(H5S_SCALAR);
+ aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ H5Awrite(aid, H5T_NATIVE_INT, &num);
+ H5Aclose(aid);
+ H5Sclose(sid);
+ } /* end if */
+ else if (this_type == is_dset) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ for (i = 0; i < 8; i++)
+ attr_data[i] = i;
+ sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
+ aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ H5Awrite(aid, H5T_NATIVE_INT, attr_data);
+ H5Aclose(aid);
+ H5Sclose(sid);
+ } /* end else-if */
+}
+
+/* Read and verify attribute for group or dataset. */
+static int
+read_attribute(hid_t obj_id, int this_type, int num)
+{
+ hid_t aid;
+ hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8};
+ int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
+ char attr_name[32];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (this_type == is_group) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
+ H5Aread(aid, H5T_NATIVE_INT, &in_num);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
+ H5Aclose(aid);
+ }
+ else if (this_type == is_dset) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ for (i = 0; i < 8; i++)
+ out_data[i] = i;
+ aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
+ H5Aread(aid, H5T_NATIVE_INT, in_data);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
+ H5Aclose(aid);
+ }
+
+ return vrfy_errors;
+}
+
+/* This functions compares the original data with the read-in data for its
+ * hyperslab part only by process ID.
+ */
+static int
+check_value(DATATYPE *indata, DATATYPE *outdata, int size)
+{
+ int mpi_rank, mpi_size, err_num = 0;
+ hsize_t i, j;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], count[DIM];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ get_slab(chunk_origin, chunk_dims, count, NULL, size);
+
+ indata += chunk_origin[0] * (hsize_t)size;
+ outdata += chunk_origin[0] * (hsize_t)size;
+ for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++)
+ for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) {
+ if (*indata != *outdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata,
+ *indata);
+ }
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num)
+ HDprintf("%d errors found in check_value\n", err_num);
+ return err_num;
+}
+
+/* Decide the portion of data chunk in dataset by process ID.
+ */
+
+static void
+get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size)
+{
+ int mpi_rank, mpi_size;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ if (chunk_origin != NULL) {
+ chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size);
+ chunk_origin[1] = 0;
+ }
+ if (chunk_dims != NULL) {
+ chunk_dims[0] = (hsize_t)(size / mpi_size);
+ chunk_dims[1] = (hsize_t)size;
+ }
+ if (file_dims != NULL)
+ file_dims[0] = file_dims[1] = (hsize_t)size;
+ if (count != NULL)
+ count[0] = count[1] = 1;
+}
+
+/*
+ * This function is based on bug demonstration code provided by Thomas
+ * Guignon(thomas.guignon@ifp.fr), and is intended to verify the
+ * correctness of my fix for that bug.
+ *
+ * In essence, the bug appeared when at least one process attempted to
+ * write a point selection -- for which collective I/O is not supported,
+ * and at least one other attempted to write some other type of selection
+ * for which collective I/O is supported.
+ *
+ * Since the processes did not compare notes before performing the I/O,
+ * some would attempt collective I/O while others performed independent
+ * I/O. A hang resulted.
+ *
+ * This function reproduces this situation. At present the test hangs
+ * on failure.
+ * JRM - 9/13/04
+ */
+
+#define N 4
+
+void
+io_mode_confusion(void)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+
+ const int rank = 1;
+ const char *dataset_name = "IntArray";
+
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace */
+ /* identifiers */
+ hsize_t dimsf[1]; /* dataset dimensions */
+ int data[N] = {1}; /* pointer to data buffer to write */
+ hsize_t coord[N] = {0L, 1L, 2L, 3L};
+ hid_t plist_id; /* property list identifier */
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+
+ int mpi_size, mpi_rank;
+
+ /*
+ * test bed related variables
+ */
+
+ const char *fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id != -1), "H5Pcreate() failed");
+
+ status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((status >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ VRFY((file_id >= 0), "H5Fcreate() failed");
+
+ status = H5Pclose(plist_id);
+ VRFY((status >= 0), "H5Pclose() failed");
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name);
+
+ dimsf[0] = N;
+ filespace = H5Screate_simple(rank, dimsf, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple() failed.");
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name);
+
+ dset_id =
+ H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() failed");
+
+ status = H5Sclose(filespace);
+ VRFY((status >= 0), "H5Sclose() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name);
+
+ memspace = H5Screate_simple(rank, dimsf, NULL);
+ VRFY((memspace >= 0), "H5Screate_simple() failed.");
+
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_all(memspace);
+ VRFY((status >= 0), "H5Sselect_all() failed");
+ }
+ else {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_none(memspace);
+ VRFY((status >= 0), "H5Sselect_none() failed");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name);
+
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "H5Dget_space() failed");
+
+ /* select all */
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord);
+ VRFY((status >= 0), "H5Sselect_elements() failed");
+ }
+ else { /* select nothing */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_none(filespace);
+ VRFY((status >= 0), "H5Sselect_none() failed");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id != -1), "H5Pcreate() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name);
+
+ status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "H5Pset_dxpl_mpio() failed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status);
+ VRFY((status >= 0), "H5Dwrite() failed");
+
+ /*
+ * Close/release resources.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name);
+
+ status = H5Dclose(dset_id);
+ VRFY((status >= 0), "H5Dclose() failed");
+
+ status = H5Sclose(filespace);
+ VRFY((status >= 0), "H5Dclose() failed");
+
+ status = H5Sclose(memspace);
+ VRFY((status >= 0), "H5Sclose() failed");
+
+ status = H5Pclose(plist_id);
+ VRFY((status >= 0), "H5Pclose() failed");
+
+ status = H5Fclose(file_id);
+ VRFY((status >= 0), "H5Fclose() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* io_mode_confusion() */
+
+#undef N
+
+/*
+ * At present, the object header code maintains an image of its on disk
+ * representation, which is updates as necessary instead of generating on
+ * request.
+ *
+ * Prior to the fix that this test in designed to verify, the image of the
+ * on disk representation was only updated on flush -- not when the object
+ * header was marked clean.
+ *
+ * This worked perfectly well as long as all writes of a given object
+ * header were written from a single process. However, with the implementation
+ * of round robin metadata data writes in parallel HDF5, this is no longer
+ * the case -- it is possible for a given object header to be flushed from
+ * several different processes, with the object header simply being marked
+ * clean in all other processes on each flush. This resulted in NULL or
+ * out of data object header information being written to disk.
+ *
+ * To repair this, I modified the object header code to update its
+ * on disk image both on flush on when marked clean.
+ *
+ * This test is directed at verifying that the fix performs as expected.
+ *
+ * The test functions by creating a HDF5 file with several small datasets,
+ * and then flushing the file. This should result of at least one of
+ * the associated object headers being flushed by a process other than
+ * process 0.
+ *
+ * Then for each data set, add an attribute and flush the file again.
+ *
+ * Close the file and re-open it.
+ *
+ * Open the each of the data sets in turn. If all opens are successful,
+ * the test passes. Otherwise the test fails.
+ *
+ * Note that this test will probably become irrelevant shortly, when we
+ * land the journaling modifications on the trunk -- at which point all
+ * cache clients will have to construct on disk images on demand.
+ *
+ * JRM -- 10/13/10
+ */
+
+#define NUM_DATA_SETS 4
+#define LOCAL_DATA_SIZE 4
+#define LARGE_ATTR_SIZE 256
+/* Since all even and odd processes are split into writer and reader comm
+ * respectively, process 0 and 1 in COMM_WORLD become the root process of
+ * the writer and reader comm respectively.
+ */
+#define Writer_Root 0
+#define Reader_Root 1
+#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
+#define Reader_result(mpi_err, xsteps_done) \
+ mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD)
+#define Reader_check(mpi_err, xsteps, xsteps_done) \
+ { \
+ Reader_wait(mpi_err, xsteps); \
+ Reader_result(mpi_err, xsteps_done); \
+ }
+
+/* object names used by both rr_obj_hdr_flush_confusion and
+ * rr_obj_hdr_flush_confusion_reader.
+ */
+const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"};
+const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"};
+const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2",
+ "large_attribute_3"};
+
+void
+rr_obj_hdr_flush_confusion(void)
+{
+ /* MPI variables */
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int is_reader; /* 1 for reader process; 0 for writer process. */
+ MPI_Comm comm;
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion";
+ const hbool_t verbose = FALSE;
+
+ /* Create two new private communicators from MPI_COMM_WORLD.
+ * Even and odd ranked processes go to comm_writers and comm_readers
+ * respectively.
+ */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or "
+ "file flush aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDassert(mpi_size > 2);
+
+ is_reader = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
+
+ /* The reader processes branches off to do reading
+ * while the writer processes continues to do writing
+ * Whenever writers finish one writing step, including a H5Fflush,
+ * they inform the readers, via MPI_COMM_WORLD, to verify.
+ * They will wait for the result from the readers before doing the next
+ * step. When all steps are done, they inform readers to end.
+ */
+ if (is_reader)
+ rr_obj_hdr_flush_confusion_reader(comm);
+ else
+ rr_obj_hdr_flush_confusion_writer(comm);
+
+ MPI_Comm_free(&comm);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* rr_obj_hdr_flush_confusion() */
+
+void
+rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
+{
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t att_id[NUM_DATA_SETS];
+ hid_t att_space[NUM_DATA_SETS];
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_space[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
+ hsize_t att_size[1];
+ hsize_t lg_att_size[1];
+ hsize_t disk_count[1];
+ hsize_t disk_size[1];
+ hsize_t disk_start[1];
+ hsize_t mem_count[1];
+ hsize_t mem_size[1];
+ hsize_t mem_start[1];
+ herr_t err;
+ double data[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
+
+ /* MPI variables */
+ /* world communication size and rank */
+ int mpi_world_size;
+ int mpi_world_rank;
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ /* steps to verify and have been verified */
+ int steps = 0;
+ int steps_done = 0;
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+ /*
+ * setup test bed related variables:
+ */
+
+#if 0
+ pt = (const H5Ptest_param_t *)GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename);
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate() failed");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
+
+ /*
+ * Step 1: create the data sets and write data.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name);
+
+ disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+
+ disk_space[i] = H5Screate_simple(1, disk_size, NULL);
+ VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
+
+ dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * write data to the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data);
+ VRFY((err >= 0), "H5Dwrite(1) failed.\n");
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+
+ /* End of Step 1: create the data sets and write data. */
+
+ /*
+ * flush the metadata cache
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(1) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 2: write attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name);
+
+ att_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ att_space[i] = H5Screate_simple(1, att_size, NULL);
+ VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
+ att_id[i] =
+ H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
+ err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
+ VRFY((err >= 0), "H5Awrite(1) failed.\n");
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] /= 10.0;
+ }
+ }
+
+ /*
+ * close attribute IDs and spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(att_space[i]);
+ VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
+ err = H5Aclose(att_id[i]);
+ VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n");
+ }
+
+ /* End of Step 2: write attributes to each dataset */
+
+ /*
+ * flush the metadata cache again
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(2) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 3: write large attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name);
+
+ lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
+ VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
+ lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT,
+ H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
+ err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
+ VRFY((err >= 0), "H5Awrite(2) failed.\n");
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] /= 10.0;
+ }
+ }
+
+ /* Step 3: write large attributes to each dataset */
+
+ /*
+ * flush the metadata cache yet again to clean the object headers.
+ *
+ * This is an attempt to create a situation where we have dirty
+ * object header continuation chunks, but clean object headers
+ * to verify a speculative bug fix -- it doesn't seem to work,
+ * but I will leave the code in anyway, as the object header
+ * code is going to change a lot in the near future.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(3) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 4: write different large attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (double)(j + 2);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
+ VRFY((err >= 0), "H5Awrite(2) failed.\n");
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] /= 10.0;
+ }
+ }
+
+ /* End of Step 4: write different large attributes to each dataset */
+
+ /*
+ * flush the metadata cache again
+ */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(3) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /* Step 5: Close all objects and the file */
+
+ /*
+ * close large attribute IDs and spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+
+ err = H5Sclose(lg_att_space[i]);
+ VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n");
+ }
+
+ /*
+ * close the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close file.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name);
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "H5Fclose(1) failed");
+
+ /* End of Step 5: Close all objects and the file */
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /* All done. Inform reader to end. */
+ steps = 0;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* rr_obj_hdr_flush_confusion_writer() */
+
+void
+rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
+{
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_type[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
+ hsize_t disk_count[1];
+ hsize_t disk_start[1];
+ hsize_t mem_count[1];
+ hsize_t mem_size[1];
+ hsize_t mem_start[1];
+ herr_t err;
+ htri_t tri_err;
+ double data[LOCAL_DATA_SIZE];
+ double data_read[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double att_read[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
+ double lg_att_read[LARGE_ATTR_SIZE];
+
+ /* MPI variables */
+ /* world communication size and rank */
+ int mpi_world_size;
+ int mpi_world_rank;
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int steps = -1; /* How far (steps) to verify the file */
+ int steps_done = -1; /* How far (steps) have been verified */
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+ /*
+ * setup test bed related variables:
+ */
+
+#if 0
+ pt = (const H5Ptest_param_t *)GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+
+ /* Repeatedly re-open the file and verify its contents until it is */
+ /* told to end (when steps=0). */
+ while (steps_done != 0) {
+ Reader_wait(mrc, steps);
+ VRFY((mrc >= 0), "Reader_wait failed");
+ steps_done = 0;
+
+ if (steps > 0) {
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename);
+
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen() failed");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
+
+#if 1
+ if (steps >= 1) {
+ /*=====================================================*
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ dataset[i] = -1;
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
+ VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
+ disk_space[i] = H5Dget_space(dataset[i]);
+ VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * read data from the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ /* set up expected data for verification */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count,
+ NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id,
+ data_read);
+ VRFY((err >= 0), "H5Dread(1) failed.\n");
+
+ /* compare read data with expected data */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 1: open the data sets and read data. */
+#endif
+
+#if 1
+ /*=====================================================*
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
+
+ if (steps >= 2) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ hid_t att_id, att_type;
+
+ att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
+ VRFY((att_id >= 0), "H5Aopen failed.\n");
+ att_type = H5Aget_type(att_id);
+ VRFY((att_type >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank,
+ fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify attribute size before H5Aread */
+ err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position "
+ "%d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] /= 10.0;
+ }
+ }
+ err = H5Aclose(att_id);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 2: reading attributes from each dataset */
+#endif
+
+#if 1
+ /*=====================================================*
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
+
+ if (steps >= 3) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
+ lg_att_type[i] = H5Aget_type(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify large attribute size before H5Aread */
+ err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for (j = 0; j < LARGE_ATTR_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at "
+ "position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+
+ lg_att[j] /= 10.0;
+ }
+ }
+ err = H5Tclose(lg_att_type[i]);
+ VRFY((err >= 0), "H5Tclose failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ /* Both step 3 and 4 use this same read checking code. */
+ steps_done = (steps == 3) ? 3 : 4;
+ }
+
+ /* End of Step 3 or 4: read large attributes from each dataset */
+#endif
+
+ /*=====================================================*
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps >= 5) {
+ /* nothing extra to verify. The file is closed normally. */
+ /* Just increment steps_done */
+ steps_done++;
+ }
+
+ /*
+ * Close the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ if (dataset[i] >= 0) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close the file
+ */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name);
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "H5Fclose(1) failed");
+
+ } /* else if (steps_done==0) */
+ Reader_result(mrc, steps_done);
+ } /* end while(1) */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+} /* rr_obj_hdr_flush_confusion_reader() */
+
+#undef NUM_DATA_SETS
+#undef LOCAL_DATA_SIZE
+#undef LARGE_ATTR_SIZE
+#undef Reader_check
+#undef Reader_wait
+#undef Reader_result
+#undef Writer_Root
+#undef Reader_Root
+
+/*
+ * Test creating a chunked dataset in parallel in a file with an alignment set
+ * and an alignment threshold large enough to avoid aligning the chunks but
+ * small enough that the raw data aggregator will be aligned if it is treated as
+ * an object that must be aligned by the library
+ */
+#define CHUNK_SIZE 72
+#define NCHUNKS 32
+#define AGGR_SIZE 2048
+#define EXTRA_ALIGN 100
+
+void
+chunk_align_bug_1(void)
+{
+ int mpi_rank;
+ hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
+ hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
+#if 0
+ h5_stat_size_t file_size;
+ hsize_t align;
+#endif
+ herr_t ret;
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ /* Create file without alignment */
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Get file size */
+ file_size = h5_get_file_size(filename, fapl_id);
+ VRFY((file_size >= 0), "h5_get_file_size succeeded");
+
+ /* Calculate alignment value, set to allow a chunk to squeak in between the
+ * original EOF and the aligned location of the aggregator. Add some space
+ * for the dataset metadata */
+ align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
+#endif
+
+ /* Set aggregator size and alignment, disable metadata aggregator */
+ HDassert(AGGR_SIZE > CHUNK_SIZE);
+ ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+ ret = H5Pset_meta_block_size(fapl_id, 0);
+ VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
+#if 0
+ ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+#endif
+
+ /* Reopen file with new settings */
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen succeeded");
+
+ /* Create dataset */
+ space_id = H5Screate_simple(1, &dims, NULL);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dcpl_id, 1, &cdims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /* Close ids */
+ ret = H5Dclose(dset_id);
+ VRFY((dset_id >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(space_id);
+ VRFY((space_id >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(dcpl_id);
+ VRFY((dcpl_id >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fapl_id);
+ VRFY((fapl_id >= 0), "H5Pclose succeeded");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ return;
+} /* end chunk_align_bug_1() */
+
+/*=============================================================================
+ * End of t_mdset.c
+ *===========================================================================*/
diff --git a/testpar/API/t_ph5basic.c b/testpar/API/t_ph5basic.c
new file mode 100644
index 0000000..1639aff
--- /dev/null
+++ b/testpar/API/t_ph5basic.c
@@ -0,0 +1,192 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Test parallel HDF5 basic components
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/*-------------------------------------------------------------------------
+ * Function: test_fapl_mpio_dup
+ *
+ * Purpose: Test if fapl_mpio property list keeps a duplicate of the
+ * communicator and INFO objects given when set; and returns
+ * duplicates of its components when H5Pget_fapl_mpio is called.
+ *
+ * Return: Success: None
+ * Failure: Abort
+ *
+ * Programmer: Albert Cheng
+ * January 9, 2003
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_fapl_mpio_dup(void)
+{
+ int mpi_size, mpi_rank;
+ MPI_Comm comm, comm_tmp;
+ int mpi_size_old, mpi_rank_old;
+ int mpi_size_tmp, mpi_rank_tmp;
+ MPI_Info info = MPI_INFO_NULL;
+ MPI_Info info_tmp = MPI_INFO_NULL;
+ int mrc; /* MPI return value */
+ hid_t acc_pl; /* File access properties */
+ herr_t ret; /* HDF5 return value */
+ int nkeys, nkeys_tmp;
+
+ if (VERBOSE_MED)
+ HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ if (VERBOSE_MED)
+ HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
+
+ /* Create a new communicator that has the same processes as MPI_COMM_WORLD.
+ * Use MPI_Comm_split because it is simpler than MPI_Comm_create
+ */
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
+ MPI_Comm_size(comm, &mpi_size_old);
+ MPI_Comm_rank(comm, &mpi_rank_old);
+ if (VERBOSE_MED)
+ HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
+
+ /* create a new INFO object with some trivial information. */
+ mrc = MPI_Info_create(&info);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_create");
+ mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_set");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_get_nkeys(info, &nkeys);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info);
+#endif
+
+ acc_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");
+
+ ret = H5Pset_fapl_mpio(acc_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* Case 1:
+ * Free the created communicator and INFO object.
+ * Check if the access property list is still valid and can return
+ * valid communicator and INFO object.
+ */
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_free(&info);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio");
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* Case 2:
+ * Free the retrieved communicator and INFO object.
+ * Check if the access property list is still valid and can return
+ * valid communicator and INFO object.
+ * Also verify the NULL argument option.
+ */
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ /* check NULL argument options. */
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+
+ ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio neither");
+
+ /* now get both and check validity too. */
+ /* Do not free the returned objects which are used in the next case. */
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio");
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* Case 3:
+ * Close the property list and verify the retrieved communicator and INFO
+ * object are still valid.
+ */
+ H5Pclose(acc_pl);
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* clean up */
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+} /* end test_fapl_mpio_dup() */
diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c
new file mode 100644
index 0000000..3659501
--- /dev/null
+++ b/testpar/API/t_prop.c
@@ -0,0 +1,646 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for encoding/decoding plists sent between processes
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5ACprivate.h"
+#include "H5Pprivate.h"
+#endif
+
+static int
+test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
+{
+ MPI_Request req[2];
+ MPI_Status status;
+ hid_t pl; /* Decoded property list */
+ size_t buf_size = 0;
+ void *sbuf = NULL;
+ herr_t ret; /* Generic return value */
+
+ if (mpi_rank == 0) {
+ int send_size = 0;
+
+ /* first call to encode returns only the size of the buffer needed */
+ ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ sbuf = (uint8_t *)HDmalloc(buf_size);
+
+ ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ /* this is a temp fix to send this size_t */
+ send_size = (int)buf_size;
+
+ MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]);
+ MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
+ } /* end if */
+
+ if (mpi_rank == recv_proc) {
+ int recv_size;
+ void *rbuf;
+
+ MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
+ VRFY((recv_size >= 0), "MPI_Recv succeeded");
+ buf_size = (size_t)recv_size;
+ rbuf = (uint8_t *)HDmalloc(buf_size);
+ MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
+
+ pl = H5Pdecode(rbuf);
+ VRFY((pl >= 0), "H5Pdecode succeeded");
+
+ VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded");
+
+ ret = H5Pclose(pl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ if (NULL != rbuf)
+ HDfree(rbuf);
+ } /* end if */
+
+ if (0 == mpi_rank) {
+ /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status
+ * array. See the discussion here:
+ *
+ * https://github.com/pmodels/mpich/issues/5687
+ */
+ /* H5_GCC_DIAG_OFF("stringop-overflow") */
+ MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
+ /* H5_GCC_DIAG_ON("stringop-overflow") */
+ }
+
+ if (NULL != sbuf)
+ HDfree(sbuf);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ return 0;
+}
+
+void
+test_plist_ed(void)
+{
+ hid_t dcpl; /* dataset create prop. list */
+ hid_t dapl; /* dataset access prop. list */
+ hid_t dxpl; /* dataset transfer prop. list */
+ hid_t gcpl; /* group create prop. list */
+ hid_t lcpl; /* link create prop. list */
+ hid_t lapl; /* link access prop. list */
+ hid_t ocpypl; /* object copy prop. list */
+ hid_t ocpl; /* object create prop. list */
+ hid_t fapl; /* file access prop. list */
+ hid_t fcpl; /* file create prop. list */
+ hid_t strcpl; /* string create prop. list */
+ hid_t acpl; /* attribute create prop. list */
+
+ int mpi_size, mpi_rank, recv_proc;
+
+ hsize_t chunk_size = 16384; /* chunk size */
+ double fill = 2.7; /* Fill value */
+ size_t nslots = 521 * 2;
+ size_t nbytes = 1048576 * 10;
+ double w0 = 0.5;
+ unsigned max_compact;
+ unsigned min_dense;
+ hsize_t max_size[1]; /*data space maximum size */
+ const char *c_to_f = "x+32";
+ H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ FALSE,
+ "temp",
+ TRUE,
+ FALSE,
+ (2 * 2048 * 1024),
+ 0.3,
+ (64 * 1024 * 1024),
+ (4 * 1024 * 1024),
+ 60000,
+ H5C_incr__threshold,
+ 0.8,
+ 3.0,
+ TRUE,
+ (8 * 1024 * 1024),
+ H5C_flash_incr__add_space,
+ 2.0,
+ 0.25,
+ H5C_decr__age_out_with_threshold,
+ 0.997,
+ 0.8,
+ TRUE,
+ (3 * 1024 * 1024),
+ 3,
+ FALSE,
+ 0.2,
+ (256 * 2048),
+ 1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */};
+
+ herr_t ret; /* Generic return value */
+
+ if (VERBOSE_MED)
+ HDprintf("Encode/Decode DCPLs\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (mpi_size == 1)
+ recv_proc = 0;
+ else
+ recv_proc = 1;
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ VRFY((ret >= 0), "H5Pset_alloc_time succeeded");
+
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
+ VRFY((ret >= 0), "set fill-value succeeded");
+
+ max_size[0] = 100;
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+
+ ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE DAPLS *****/
+ dapl = H5Pcreate(H5P_DATASET_ACCESS);
+ VRFY((dapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0);
+ VRFY((ret >= 0), "H5Pset_chunk_cache succeeded");
+
+ ret = test_encode_decode(dapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE OCPLS *****/
+ ocpl = H5Pcreate(H5P_OBJECT_CREATE);
+ VRFY((ocpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded");
+
+ ret = H5Pset_attr_phase_change(ocpl, 110, 105);
+ VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded");
+
+ ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL);
+ VRFY((ret >= 0), "H5Pset_filter succeeded");
+
+ ret = test_encode_decode(ocpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE DXPLS *****/
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2);
+ VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");
+
+ ret = H5Pset_hyper_vector_size(dxpl, 5);
+ VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded");
+
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC);
+ VRFY((ret >= 0), "H5Pset_edc_check succeeded");
+
+ ret = H5Pset_data_transform(dxpl, c_to_f);
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+
+ ret = test_encode_decode(dxpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dxpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE GCPLS *****/
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_local_heap_size_hint(gcpl, 256);
+ VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded");
+
+ ret = H5Pset_link_phase_change(gcpl, 2, 2);
+ VRFY((ret >= 0), "H5Pset_link_phase_change succeeded");
+
+ /* Query the group creation properties */
+ ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense);
+ VRFY((ret >= 0), "H5Pget_est_link_info succeeded");
+
+ ret = H5Pset_est_link_info(gcpl, 3, 9);
+ VRFY((ret >= 0), "H5Pset_est_link_info succeeded");
+
+ ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_link_creation_order succeeded");
+
+ ret = test_encode_decode(gcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(gcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE LCPLS *****/
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_create_intermediate_group(lcpl, TRUE);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE LAPLS *****/
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_nlinks(lapl, (size_t)134);
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY);
+ VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded");
+
+ ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod");
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ /* Create FAPL for the elink FAPL */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ /* Close the elink's FAPL */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = test_encode_decode(lapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE OCPYPLS *****/
+ ocpypl = H5Pcreate(H5P_OBJECT_COPY);
+ VRFY((ocpypl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG);
+ VRFY((ret >= 0), "H5Pset_copy_object succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = test_encode_decode(ocpypl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpypl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE FAPLS *****/
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_family_offset(fapl, 1024);
+ VRFY((ret >= 0), "H5Pset_family_offset succeeded");
+
+ ret = H5Pset_meta_block_size(fapl, 2098452);
+ VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
+
+ ret = H5Pset_sieve_buf_size(fapl, 1048576);
+ VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded");
+
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+
+ ret = H5Pset_elink_file_cache_size(fapl, 10485760);
+ VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded");
+
+ ret = H5Pset_gc_references(fapl, 1);
+ VRFY((ret >= 0), "H5Pset_gc_references succeeded");
+
+ ret = H5Pset_small_data_block_size(fapl, 2048);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((ret >= 0), "H5Pset_libver_bounds succeeded");
+
+ ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK);
+ VRFY((ret >= 0), "H5Pset_fclose_degree succeeded");
+
+ ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP);
+ VRFY((ret >= 0), "H5Pset_multi_type succeeded");
+
+ ret = H5Pset_mdc_config(fapl, &my_cache_config);
+ VRFY((ret >= 0), "H5Pset_mdc_config succeeded");
+
+ ret = test_encode_decode(fapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE FCPLS *****/
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_userblock(fcpl, 1024);
+ VRFY((ret >= 0), "H5Pset_userblock succeeded");
+
+ ret = H5Pset_istore_k(fcpl, 3);
+ VRFY((ret >= 0), "H5Pset_istore_k succeeded");
+
+ ret = H5Pset_sym_k(fcpl, 4, 5);
+ VRFY((ret >= 0), "H5Pset_sym_k succeeded");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");
+
+ ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");
+
+ ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded");
+
+ ret = H5Pset_sizes(fcpl, 8, 4);
+ VRFY((ret >= 0), "H5Pset_sizes succeeded");
+
+ ret = test_encode_decode(fcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE STRCPLS *****/
+ strcpl = H5Pcreate(H5P_STRING_CREATE);
+ VRFY((strcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(strcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(strcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE ACPLS *****/
+ acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ VRFY((acpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(acpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(acpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+}
+
+#if 0
+void
+external_links(void)
+{
+ hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */
+ hid_t lapl = H5I_INVALID_HID; /* link access prop. list */
+ hid_t fapl = H5I_INVALID_HID; /* file access prop. list */
+ hid_t gapl = H5I_INVALID_HID; /* group access prop. list */
+ hid_t fid = H5I_INVALID_HID; /* file id */
+ hid_t group = H5I_INVALID_HID; /* group id */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm;
+ int doIO;
+ int i, mrc;
+
+ herr_t ret; /* Generic return value */
+ htri_t tri_status; /* tri return value */
+
+ const char *filename = "HDF5test.h5";
+ const char *filename_ext = "HDF5test_ext.h5";
+ const char *group_path = "/Base/Block/Step";
+ const char *link_name = "link"; /* external link */
+ char link_path[50];
+
+ if (VERBOSE_MED)
+ HDprintf("Check external links\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Check MPI communicator access properties are passed to
+ linked external files */
+
+ if (mpi_rank == 0) {
+
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_create_intermediate_group(lcpl, 1);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ /* Create file to serve as target for external link.*/
+ fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Create a new file using the file access property list. */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ /* Create external links to the target files. */
+ ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Lcreate_external succeeded");
+
+ /* Close and release resources. */
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * For the first case, use all the processes. For the second case
+ * use a sub-communicator to verify the correct communicator is
+ * being used for the externally linked files.
+ * There is no way to determine if MPI info is being used for the
+ * externally linked files.
+ */
+
+ for (i = 0; i < 2; i++) {
+
+ comm = MPI_COMM_WORLD;
+
+ if (i == 0)
+ doIO = 1;
+ else {
+ doIO = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+
+ if (doIO) {
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL);
+ VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded");
+
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* test opening a group that is to an external link, the external linked
+ file should inherit the source file's access properties */
+ HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
+ group = H5Gopen2(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gopen succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ /* test opening a group that is external link by setting group
+ creation property */
+ gapl = H5Pcreate(H5P_GROUP_ACCESS);
+ VRFY((gapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(gapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ group = H5Gopen2(fid, link_path, gapl);
+ VRFY((group >= 0), "H5Gopen succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Pclose(gapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* test link APIs */
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ tri_status = H5Lexists(fid, link_path, lapl);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ group = H5Oopen(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ group = H5Oopen(fid, link_path, lapl);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close the remaining resources */
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ if (comm != MPI_COMM_WORLD) {
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* delete the test files */
+ if (mpi_rank == 0) {
+ MPI_File_delete(filename, MPI_INFO_NULL);
+ MPI_File_delete(filename_ext, MPI_INFO_NULL);
+ }
+}
+#endif
diff --git a/testpar/API/t_pshutdown.c b/testpar/API/t_pshutdown.c
new file mode 100644
index 0000000..48a8005
--- /dev/null
+++ b/testpar/API/t_pshutdown.c
@@ -0,0 +1,150 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Mohamad Chaarawi
+ * February 2015
+ *
+ * Purpose: This test creates a file and a bunch of objects in the
+ * file and then calls MPI_Finalize without closing anything. The
+ * library should exercise the attribute callback destroy attached to
+ * MPI_COMM_SELF and terminate the HDF5 library closing all open
+ * objects. The t_prestart test will read back the file and make sure
+ * all created objects are there.
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+int nerrors = 0; /* errors count */
+
+const char *FILENAME[] = {"shutdown.h5", NULL};
+
+int
+main(int argc, char **argv)
+{
+ hid_t file_id, dset_id, grp_id;
+ hid_t fapl, sid, mem_dataspace;
+ hsize_t dims[RANK], i;
+ herr_t ret;
+#if 0
+ char filename[1024];
+#endif
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL; /* data buffer */
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ if (MAINPROCESS) {
+ printf("Testing %-62s", "proper shutdown of HDF5 library");
+ fflush(stdout);
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Get the capability flag of the VOL connector being used */
+ ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
+ VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ ret = H5Pset_fapl_mpio(fapl, comm, info);
+ VRFY((ret >= 0), "");
+
+#if 0
+ h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+#endif
+ file_id = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+ grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id >= 0), "H5Gcreate succeeded");
+
+ dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
+ dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate succeeded");
+
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < dims[0] * dims[1]; i++)
+ data_array[i] = mpi_rank + 1;
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* write data independently */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release data buffers */
+ if (data_array)
+ HDfree(data_array);
+
+ MPI_Finalize();
+
+ /* nerrors += GetTestNumErrs(); */
+
+ if (MAINPROCESS) {
+ if (0 == nerrors) {
+ puts(" PASSED");
+ fflush(stdout);
+ }
+ else {
+ puts("*FAILED*");
+ fflush(stdout);
+ }
+ }
+
+ return (nerrors != 0);
+}
diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c
new file mode 100644
index 0000000..340e89e
--- /dev/null
+++ b/testpar/API/t_shapesame.c
@@ -0,0 +1,4516 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ This program will test independent and collective reads and writes between
+ selections of different rank that non-the-less are deemed as having the
+ same shape by H5Sselect_shape_same().
+ */
+
+#define H5S_FRIEND /*suppress error about including H5Spkg */
+
+/* Define this macro to indicate that the testing APIs should be available */
+#define H5S_TESTING
+
+#if 0
+#include "H5Spkg.h" /* Dataspaces */
+#endif
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/* FILENAME and filenames must have the same number of names.
+ * Use PARATESTFILE in general and use a separated filename only if the file
+ * created in one test is accessed by a different test.
+ * filenames[0] is reserved as the file name for PARATESTFILE.
+ */
+#define NFILENAME 2
+const char *FILENAME[NFILENAME] = {"ShapeSameTest.h5", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+
+/* On Lustre (and perhaps other parallel file systems?), we have severe
+ * slow downs if two or more processes attempt to access the same file system
+ * block. To minimize this problem, we set alignment in the shape same tests
+ * to the default Lustre block size -- which greatly reduces contention in
+ * the chunked dataset case.
+ */
+
+#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
+
+#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
+
+struct hs_dr_pio_test_vars_t {
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ int test_num;
+ int edge_size;
+ int checker_edge_size;
+ int chunk_edge_size;
+ int small_rank;
+ int large_rank;
+ hid_t dset_type;
+ uint32_t *small_ds_buf_0;
+ uint32_t *small_ds_buf_1;
+ uint32_t *small_ds_buf_2;
+ uint32_t *small_ds_slice_buf;
+ uint32_t *large_ds_buf_0;
+ uint32_t *large_ds_buf_1;
+ uint32_t *large_ds_buf_2;
+ uint32_t *large_ds_slice_buf;
+ int small_ds_offset;
+ int large_ds_offset;
+ hid_t fid; /* HDF5 file ID */
+ hid_t xfer_plist;
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid_0;
+ hid_t file_small_ds_sid_1;
+ hid_t small_ds_slice_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid_0;
+ hid_t file_large_ds_sid_1;
+ hid_t file_large_ds_process_slice_sid;
+ hid_t mem_large_ds_process_slice_sid;
+ hid_t large_ds_slice_sid;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+ size_t large_ds_slice_size;
+ hsize_t dims[PAR_SS_DR_MAX_RANK];
+ hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ hsize_t *start_ptr;
+ hsize_t *stride_ptr;
+ hsize_t *count_ptr;
+ hsize_t *block_ptr;
+ int skips;
+ int max_skips;
+ int64_t total_tests;
+ int64_t tests_run;
+ int64_t tests_skipped;
+};
+
+/*-------------------------------------------------------------------------
+ * Function: hs_dr_pio_test__setup()
+ *
+ * Purpose: Do setup for tests of I/O to/from hyperslab selections of
+ * different rank in the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/9/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0
+
+static void
+hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type, const int express_test,
+ struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+ const char *fcnName = "hs_dr_pio_test__setup()";
+#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
+ const char *filename;
+ hbool_t mis_match = FALSE;
+ int i;
+ int mrc;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
+ uint32_t *ptr_0;
+ uint32_t *ptr_1;
+ hid_t acc_tpl; /* File access templates */
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(1 < small_rank);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= PAR_SS_DR_MAX_RANK);
+
+ tv_ptr->test_num = test_num;
+ tv_ptr->edge_size = edge_size;
+ tv_ptr->checker_edge_size = checker_edge_size;
+ tv_ptr->chunk_edge_size = chunk_edge_size;
+ tv_ptr->small_rank = small_rank;
+ tv_ptr->large_rank = large_rank;
+ tv_ptr->dset_type = dset_type;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size));
+ MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank));
+ /* the VRFY() macro needs the local variable mpi_rank -- set it up now */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ HDassert(tv_ptr->mpi_size >= 1);
+
+ tv_ptr->mpi_comm = MPI_COMM_WORLD;
+ tv_ptr->mpi_info = MPI_INFO_NULL;
+
+ for (i = 0; i < tv_ptr->small_rank - 1; i++) {
+ tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size);
+ tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size);
+ }
+ tv_ptr->small_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
+
+ /* used by checker board tests only */
+ tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank;
+
+ HDassert(0 < tv_ptr->small_ds_offset);
+ HDassert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK);
+
+ for (i = 0; i < tv_ptr->large_rank - 1; i++) {
+
+ tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size);
+ tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size);
+ }
+ tv_ptr->large_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
+
+ /* used by checker board tests only */
+ tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank;
+
+ HDassert(0 <= tv_ptr->large_ds_offset);
+ HDassert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK);
+
+ /* set up the start, stride, count, and block pointers */
+ /* used by contiguous tests only */
+ tv_ptr->start_ptr = &(tv_ptr->start[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->stride_ptr = &(tv_ptr->stride[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+
+ /* Allocate buffers */
+ tv_ptr->small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ tv_ptr->small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
+
+ tv_ptr->small_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+ VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
+
+ tv_ptr->large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ tv_ptr->large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
+
+ tv_ptr->large_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
+ VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = tv_ptr->small_ds_buf_0;
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++)
+ *ptr_0++ = (uint32_t)i;
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+ HDmemset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+ ptr_0 = tv_ptr->large_ds_buf_0;
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++)
+ *ptr_0++ = (uint32_t)i;
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+ HDmemset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ HDmemset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
+
+ filename = filenames[0]; /* (const char *)GetTestParameters(); */
+ HDassert(filename != NULL);
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+ if (MAINPROCESS) {
+
+ HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
+ HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
+ HDfprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank,
+ tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io);
+ HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size,
+ tv_ptr->chunk_edge_size);
+ HDfprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
+ HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank,
+ (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size));
+ HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(tv_ptr->mpi_comm, tv_ptr->mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* set the alignment -- need it large so that we aren't always hitting the
+ * the same file system block. Do this only if express_test is greater
+ * than zero.
+ */
+ if (express_test > 0) {
+
+ ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT);
+ VRFY((ret != FAIL), "H5Pset_alignment() succeeded");
+ }
+
+ /* create the file collectively */
+ tv_ptr->fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((tv_ptr->fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims: */
+ tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
+ tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
+
+ /* Create small ds dataspaces */
+ tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded");
+
+ /* used by checker board tests only */
+ tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded");
+
+ tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded");
+
+ /* Create large ds dataspaces */
+ tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded");
+
+ /* used by checker board tests only */
+ tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded");
+
+ tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
+
+ tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() file_large_ds_process_slice_sid succeeded");
+
+ tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (tv_ptr->chunk_edge_size > 0) {
+
+ /* Under Lustre (and perhaps other parallel file systems?) we get
+ * locking delays when two or more processes attempt to access the
+ * same file system block.
+ *
+ * To minimize this problem, I have changed chunk_dims[0]
+ * from (mpi_size + 1) to just when any sort of express test is
+ * selected. Given the structure of the test, and assuming we
+ * set the alignment large enough, this avoids the contention
+ * issue by seeing to it that each chunk is only accessed by one
+ * process.
+ *
+ * One can argue as to whether this is a good thing to do in our
+ * tests, but for now it is necessary if we want the test to complete
+ * in a reasonable amount of time.
+ *
+ * JRM -- 9/16/10
+ */
+
+ tv_ptr->chunk_dims[0] = 1;
+
+ tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] =
+ (hsize_t)(tv_ptr->chunk_edge_size);
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, tv_ptr->large_rank, tv_ptr->chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ tv_ptr->small_dataset =
+ H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ tv_ptr->large_dataset =
+ H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
+
+ /* setup xfer property list */
+ tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if (use_collective_io) {
+ ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+
+ /* setup selection to write initial data to the small and large data sets */
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded");
+ }
+
+ /* write the initial value of the small data set to file */
+ ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid,
+ tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "small ds init data good.");
+
+ /* setup selections for writing initial data to the large data set */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
+
+ /* In passing, setup the process slice dataspaces as well */
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded");
+ }
+
+ /* write the initial value of the large data set to file */
+ ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
+
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid,
+ tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the large data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "large ds init data good.");
+
+ /* sync with the other processes before changing data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
+
+ return;
+
+} /* hs_dr_pio_test__setup() */
+
+/*-------------------------------------------------------------------------
+ * Function: hs_dr_pio_test__takedown()
+ *
+ * Purpose: Do takedown after tests of I/O to/from hyperslab selections
+ * of different rank in the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0
+
+static void
+hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
+ const char *fcnName = "hs_dr_pio_test__takedown()";
+#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
+ int mpi_rank; /* needed by the VRFY macro */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Close property lists */
+ if (tv_ptr->xfer_plist != H5P_DEFAULT) {
+ ret = H5Pclose(tv_ptr->xfer_plist);
+ VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded");
+ }
+
+ /* Close dataspaces */
+ ret = H5Sclose(tv_ptr->full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_small_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_small_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded");
+
+ ret = H5Sclose(tv_ptr->small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_0) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_1) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded");
+
+ /* Close Datasets */
+ ret = H5Dclose(tv_ptr->small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(tv_ptr->large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(tv_ptr->fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+
+ if (tv_ptr->small_ds_buf_0 != NULL)
+ HDfree(tv_ptr->small_ds_buf_0);
+ if (tv_ptr->small_ds_buf_1 != NULL)
+ HDfree(tv_ptr->small_ds_buf_1);
+ if (tv_ptr->small_ds_buf_2 != NULL)
+ HDfree(tv_ptr->small_ds_buf_2);
+ if (tv_ptr->small_ds_slice_buf != NULL)
+ HDfree(tv_ptr->small_ds_slice_buf);
+
+ if (tv_ptr->large_ds_buf_0 != NULL)
+ HDfree(tv_ptr->large_ds_buf_0);
+ if (tv_ptr->large_ds_buf_1 != NULL)
+ HDfree(tv_ptr->large_ds_buf_1);
+ if (tv_ptr->large_ds_buf_2 != NULL)
+ HDfree(tv_ptr->large_ds_buf_2);
+ if (tv_ptr->large_ds_slice_buf != NULL)
+ HDfree(tv_ptr->large_ds_slice_buf);
+
+ return;
+
+} /* hs_dr_pio_test__takedown() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__d2m_l2s()
+ *
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading small_rank - 1
+ * slices from the on disk large cube, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__run_test()";
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* We have already done a H5Sselect_all() on the dataspace
+ * small_ds_slice_sid in the initialization phase, so no need to
+ * call H5Sselect_all() again.
+ */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't iterate over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
+ H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_slice_buf;
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ for (n = 0; n < tv_ptr->small_ds_slice_size; n++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = 0; /* zero data for next use */
+
+ ptr_1++;
+ expected_value++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__d2m_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__d2m_s2l()
+ *
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading slices of the
+ * on disk small data set into slices through the in memory
+ * large data set, and verify that the correct data (and
+ * only the correct data) is read.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
+
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__d2m_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__m2d_l2s()
+ *
+ * Purpose: Part three of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing small_rank - 1 dimensional slices from
+ * the in memory large data set to the on disk small cube
+ * dataset. After each write, read the slice of the small
+ * dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 dimensional slices from the in memory large
+ * data set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory small ds */
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory slice through the cube selection and the
+ * on disk full square selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+
+ /* write the slice from the in memory large data set to the
+ * slice of the on disk small dataset. */
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+ /* read the on disk square into memory */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_buf_1;
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
+
+ for (n = 0; n < tv_ptr->small_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice write from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__m2d_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__m2d_s2l()
+ *
+ * Purpose: Part four of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing the contents of the process's slice of
+ * the in memory small data set to slices of the on disk
+ * large data set. After each write, read the process's
+ * slice of the large data set back into memory, and verify
+ * that it contains the expected data.
+ *
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ /* select the slice of the in memory small data set associated with
+ * the process's mpi rank.
+ */
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write slices of the small data set to
+ * slices of the large data set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDfprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName,
+ (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist,
+ tv_ptr->large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(
+ tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
+
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out buffer for next test */
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__m2d_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
+ const int small_rank, const int large_rank, const hbool_t use_collective_io,
+ const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
+ int mpi_rank)
+{
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__run_test()";
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ struct hs_dr_pio_test_vars_t test_vars = {
+ /* int mpi_size = */ -1,
+ /* int mpi_rank = */ -1,
+ /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
+ /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
+ /* int test_num = */ -1,
+ /* int edge_size = */ -1,
+ /* int checker_edge_size = */ -1,
+ /* int chunk_edge_size = */ -1,
+ /* int small_rank = */ -1,
+ /* int large_rank = */ -1,
+ /* hid_t dset_type = */ -1,
+ /* uint32_t * small_ds_buf_0 = */ NULL,
+ /* uint32_t * small_ds_buf_1 = */ NULL,
+ /* uint32_t * small_ds_buf_2 = */ NULL,
+ /* uint32_t * small_ds_slice_buf = */ NULL,
+ /* uint32_t * large_ds_buf_0 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_2 = */ NULL,
+ /* uint32_t * large_ds_slice_buf = */ NULL,
+ /* int small_ds_offset = */ -1,
+ /* int large_ds_offset = */ -1,
+ /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t xfer_plist = */ H5P_DEFAULT,
+ /* hid_t full_mem_small_ds_sid = */ -1,
+ /* hid_t full_file_small_ds_sid = */ -1,
+ /* hid_t mem_small_ds_sid = */ -1,
+ /* hid_t file_small_ds_sid_0 = */ -1,
+ /* hid_t file_small_ds_sid_1 = */ -1,
+ /* hid_t small_ds_slice_sid = */ -1,
+ /* hid_t full_mem_large_ds_sid = */ -1,
+ /* hid_t full_file_large_ds_sid = */ -1,
+ /* hid_t mem_large_ds_sid = */ -1,
+ /* hid_t file_large_ds_sid_0 = */ -1,
+ /* hid_t file_large_ds_sid_1 = */ -1,
+ /* hid_t file_large_ds_process_slice_sid = */ -1,
+ /* hid_t mem_large_ds_process_slice_sid = */ -1,
+ /* hid_t large_ds_slice_sid = */ -1,
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* size_t small_ds_size = */ 1,
+ /* size_t small_ds_slice_size = */ 1,
+ /* size_t large_ds_size = */ 1,
+ /* size_t large_ds_slice_size = */ 1,
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t * start_ptr = */ NULL,
+ /* hsize_t * stride_ptr = */ NULL,
+ /* hsize_t * count_ptr = */ NULL,
+ /* hsize_t * block_ptr = */ NULL,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
+ /* int64_t total_tests = */ 0,
+ /* int64_t tests_run = */ 0,
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
+ hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io,
+ dset_type, express_test, tv_ptr);
+
+ /* initialize skips & max_skips */
+ tv_ptr->skips = *skips_ptr;
+ tv_ptr->max_skips = max_skips;
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
+ HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank - 1 dimensional slice from the on disk
+ * large cube, and verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
+
+ /* Second, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__d2m_s2l(tv_ptr);
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ hs_dr_pio_test__takedown(tv_ptr);
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ *skips_ptr = tv_ptr->skips;
+ *total_tests_ptr += tv_ptr->total_tests;
+ *tests_run_ptr += tv_ptr->tests_run;
+ *tests_skipped_ptr += tv_ptr->tests_skipped;
+
+ return;
+
+} /* contig_hs_dr_pio_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__DEBUG 0
+
+static void
+contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+{
+ int express_test;
+ int local_express_test;
+ int mpi_rank = -1;
+ int mpi_size;
+ int test_num = 0;
+ int edge_size;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int mpi_result;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
+ * test level. Note that any value in excess of 4880 will cause all
+ * sub tests to be skipped.
+ */
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ edge_size = (mpi_size > 6 ? mpi_size : 6);
+
+ local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
+
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
+ MPI_COMM_WORLD);
+
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
+
+ if (local_express_test < 0) {
+ max_skips = max_skips_tbl[0];
+ }
+ else if (local_express_test > 3) {
+ max_skips = max_skips_tbl[3];
+ }
+ else {
+ max_skips = max_skips_tbl[local_express_test];
+ }
+
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
+
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
+
+ switch (sstest_type) {
+ case IND_CONTIG:
+ /* contiguous data set, independent I/O */
+ chunk_edge_size = 0;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CONTIG */
+
+ case COL_CONTIG:
+ /* contiguous data set, collective I/O */
+ chunk_edge_size = 0;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CONTIG */
+
+ case IND_CHUNKED:
+ /* chunked data set, independent I/O */
+ chunk_edge_size = 5;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CHUNKED */
+
+ case COL_CHUNKED:
+ /* chunked data set, collective I/O */
+ chunk_edge_size = 5;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CHUNKED */
+
+ default:
+ VRFY((FALSE), "unknown test type");
+ break;
+
+ } /* end of switch(sstest_type) */
+#if CONTIG_HS_DR_PIO_TEST__DEBUG
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
+ HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
+ total_tests);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
+ }
+ }
+
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ HDprintf("\n");
+ }
+
+ return;
+
+} /* contig_hs_dr_pio_test() */
+
+/****************************************************************
+**
+** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
+** Given a dataspace of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum dataspace rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 10/8/09
+**
+****************************************************************/
+
+#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const int edge_size, const int checker_edge_size, const int sel_rank,
+ hsize_t sel_start[])
+{
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert(sel_offset >= 0);
+
+ n_cube_offset = test_max_rank - tgt_rank;
+ HDassert(n_cube_offset >= 0);
+ HDassert(n_cube_offset <= sel_offset);
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size,
+ checker_edge_size);
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset);
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = (hsize_t)(edge_size / (checker_edge_size * 2));
+
+ if ((edge_size % (checker_edge_size * 2)) > 0) {
+
+ base_count++;
+ }
+
+ offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2));
+
+ if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) {
+
+ offset_count++;
+ }
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while (i < n_cube_offset) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while (i < sel_offset) {
+
+ start[i] = sel_start[i];
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while (i < test_max_rank) {
+
+ stride[i] = (hsize_t)(2 * checker_edge_size);
+ block[i] = (hsize_t)checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if (0 >= sel_offset) {
+
+ if (i == 0) {
+
+ start[0] = 0;
+ count[0] = base_count;
+ }
+ else {
+
+ start[0] = (hsize_t)checker_edge_size;
+ count[0] = offset_count;
+ }
+ }
+
+ j = 0;
+ do {
+ if (1 >= sel_offset) {
+
+ if (j == 0) {
+
+ start[1] = 0;
+ count[1] = base_count;
+ }
+ else {
+
+ start[1] = (hsize_t)checker_edge_size;
+ count[1] = offset_count;
+ }
+ }
+
+ k = 0;
+ do {
+ if (2 >= sel_offset) {
+
+ if (k == 0) {
+
+ start[2] = 0;
+ count[2] = base_count;
+ }
+ else {
+
+ start[2] = (hsize_t)checker_edge_size;
+ count[2] = offset_count;
+ }
+ }
+
+ l = 0;
+ do {
+ if (3 >= sel_offset) {
+
+ if (l == 0) {
+
+ start[3] = 0;
+ count[3] = base_count;
+ }
+ else {
+
+ start[3] = (hsize_t)checker_edge_size;
+ count[3] = offset_count;
+ }
+ }
+
+ m = 0;
+ do {
+ if (4 >= sel_offset) {
+
+ if (m == 0) {
+
+ start[4] = 0;
+ count[4] = base_count;
+ }
+ else {
+
+ start[4] = (hsize_t)checker_edge_size;
+ count[4] = offset_count;
+ }
+ }
+
+ if (((i + j + k + l + m) % 2) == 0) {
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j,
+ k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank);
+#endif
+
+ if (first_selection) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+ }
+ }
+
+ m++;
+
+ } while ((m <= 1) && (4 >= sel_offset));
+
+ l++;
+
+ } while ((l <= 1) && (3 >= sel_offset));
+
+ k++;
+
+ } while ((k <= 1) && (2 >= sel_offset));
+
+ j++;
+
+ } while ((j <= 1) && (1 >= sel_offset));
+
+ i++;
+
+ } while ((i <= 1) && (0 >= sel_offset));
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the dataspace proper. */
+
+ for (i = 0; i < test_max_rank; i++) {
+
+ start[i] = 0;
+ stride[i] = (hsize_t)edge_size;
+ count[i] = 1;
+ block[i] = (hsize_t)edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */
+
+/****************************************************************
+**
+** ckrbrd_hs_dr_pio_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube opposite the origin.)
+**
+****************************************************************/
+
+#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
+ const int checker_edge_size, uint32_t first_expected_val,
+ hbool_t buf_starts_in_checker)
+{
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
+
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
+}
+#endif
+
+val_ptr = buf_ptr;
+expected_value = first_expected_val;
+
+i = 0;
+v = 0;
+start_in_checker[0] = buf_starts_in_checker;
+do {
+ if (v >= checker_edge_size) {
+
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
+
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+#endif
+ in_checker = start_in_checker[3];
+ do {
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+#endif
+ if (z >= checker_edge_size) {
+
+ in_checker = !in_checker;
+ z = 0;
+ }
+
+ if (in_checker) {
+
+ if (*val_ptr != expected_value) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+ else if (*val_ptr != 0) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "\n");
+#endif
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+} while ((rank >= test_max_rank) && (i < edge_size));
+
+return (good_data);
+
+} /* ckrbrd_hs_dr_pio_test__verify_data() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
+ *
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * In this function, we test this by reading small_rank - 1
+ * checker board slices from the on disk large cube, and
+ * verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
+ uint32_t *ptr_0;
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ uint32_t expected_value;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading a (small_rank - 1)-D checker board slice from this
+ * processes slice of the on disk large data set, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same() returns
+ * true on the memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank);
+ ptr_0 = tv_ptr->small_ds_slice_buf;
+ for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_0));
+ ptr_0++;
+ }
+ HDfprintf(stdout, "\n");
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName,
+ tv_ptr->mpi_rank);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't iterate over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
+ H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ VRFY((data_ok == TRUE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
+ *
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading checker board
+ * slices of the on disk small data set into slices through
+ * the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read the slice of the small data set
+ * into different slices of the process slice of the large data
+ * set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ data_ok = TRUE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ {
+ int m, n;
+
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank,
+ expected_value);
+ HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ start_index, stop_index);
+ n = 0;
+ for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_1));
+ ptr_1++;
+ n++;
+ if (n >= tv_ptr->edge_size) {
+ HDfprintf(stdout, "\n");
+ n = 0;
+ }
+ }
+ HDfprintf(stdout, "\n");
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ }
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
+
+ for (u = 0; u < start_index; u++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(1).");
+
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size,
+ expected_value, (hbool_t)TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(2).");
+
+ ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
+
+ for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(3).");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
+ *
+ * Purpose: Part three of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the
+ * parallel.
+ *
+ * Verify that we can write from memory to file using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Do this by writing small_rank - 1 dimensional checker
+ * board slices from the in memory large data set to the on
+ * disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify
+ * that it contains the expected data. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory small ds */
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
+ "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
+ fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory checkerboard selection of the slice through the
+ * large dataset and the checkerboard selection of the process
+ * slice of the small data set as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+
+ /* write the checker board selection of the slice from the in
+ * memory large data set to the slice of the on disk small
+ * dataset.
+ */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+ /* read the on disk process slice of the small dataset into memory */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
+
+ data_ok = TRUE;
+
+ ptr_1 = tv_ptr->small_ds_buf_1;
+ for (u = 0; u < start_index; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ ptr_1 = tv_ptr->small_ds_buf_1;
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE), "large slice write slice to small slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
+ *
+ * Purpose: Part four of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing checker board selections of the contents
+ * of the process's slice of the in memory small data set to
+ * slices of the on disk large data set. After each write,
+ * read the process's slice of the large data set back into
+ * memory, and verify that it contains the expected data.
+ *
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded");
+
+ /* setup a checkerboard selection of the slice of the in memory small
+ * data set associated with the process's mpi rank.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write checkerboard selections of slices
+ * of the small data set to slices of the large data set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout,
+ "%s writing process checkerboard selections of slices of small ds to process slices of large "
+ "ds on disk.\n",
+ fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
+
+ data_ok = TRUE;
+
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ for (u = 0; u < start_index; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
+ * different rank in the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 10/10/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type,
+ const int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
+ int mpi_rank)
+
+{
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ struct hs_dr_pio_test_vars_t test_vars = {
+ /* int mpi_size = */ -1,
+ /* int mpi_rank = */ -1,
+ /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
+ /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
+ /* int test_num = */ -1,
+ /* int edge_size = */ -1,
+ /* int checker_edge_size = */ -1,
+ /* int chunk_edge_size = */ -1,
+ /* int small_rank = */ -1,
+ /* int large_rank = */ -1,
+ /* hid_t dset_type = */ -1,
+ /* uint32_t * small_ds_buf_0 = */ NULL,
+ /* uint32_t * small_ds_buf_1 = */ NULL,
+ /* uint32_t * small_ds_buf_2 = */ NULL,
+ /* uint32_t * small_ds_slice_buf = */ NULL,
+ /* uint32_t * large_ds_buf_0 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_2 = */ NULL,
+ /* uint32_t * large_ds_slice_buf = */ NULL,
+ /* int small_ds_offset = */ -1,
+ /* int large_ds_offset = */ -1,
+ /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t xfer_plist = */ H5P_DEFAULT,
+ /* hid_t full_mem_small_ds_sid = */ -1,
+ /* hid_t full_file_small_ds_sid = */ -1,
+ /* hid_t mem_small_ds_sid = */ -1,
+ /* hid_t file_small_ds_sid_0 = */ -1,
+ /* hid_t file_small_ds_sid_1 = */ -1,
+ /* hid_t small_ds_slice_sid = */ -1,
+ /* hid_t full_mem_large_ds_sid = */ -1,
+ /* hid_t full_file_large_ds_sid = */ -1,
+ /* hid_t mem_large_ds_sid = */ -1,
+ /* hid_t file_large_ds_sid_0 = */ -1,
+ /* hid_t file_large_ds_sid_1 = */ -1,
+ /* hid_t file_large_ds_process_slice_sid = */ -1,
+ /* hid_t mem_large_ds_process_slice_sid = */ -1,
+ /* hid_t large_ds_slice_sid = */ -1,
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* size_t small_ds_size = */ 1,
+ /* size_t small_ds_slice_size = */ 1,
+ /* size_t large_ds_size = */ 1,
+ /* size_t large_ds_slice_size = */ 1,
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t * start_ptr = */ NULL,
+ /* hsize_t * stride_ptr = */ NULL,
+ /* hsize_t * count_ptr = */ NULL,
+ /* hsize_t * block_ptr = */ NULL,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
+ /* int64_t total_tests = */ 0,
+ /* int64_t tests_run = */ 0,
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
+ hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank,
+ use_collective_io, dset_type, express_test, tv_ptr);
+
+ /* initialize skips & max_skips */
+ tv_ptr->skips = *skips_ptr;
+ tv_ptr->max_skips = max_skips;
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
+ HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading a (small_rank - 1)-D slice from this processes slice
+ * of the on disk large data set, and verifying that the data read is
+ * correct. Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr);
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr);
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ hs_dr_pio_test__takedown(tv_ptr);
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ *skips_ptr = tv_ptr->skips;
+ *total_tests_ptr += tv_ptr->total_tests;
+ *tests_run_ptr += tv_ptr->tests_run;
+ *tests_skipped_ptr += tv_ptr->tests_skipped;
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+{
+ int express_test;
+ int local_express_test;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int test_num = 0;
+ int edge_size;
+ int checker_edge_size = 3;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int mpi_result;
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
+ * test level. Note that any value in excess of 4880 will cause all
+ * sub tests to be skipped.
+ */
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ edge_size = (mpi_size > 6 ? mpi_size : 6);
+
+ local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
+ MPI_COMM_WORLD);
+
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
+
+ if (local_express_test < 0) {
+ max_skips = max_skips_tbl[0];
+ }
+ else if (local_express_test > 3) {
+ max_skips = max_skips_tbl[3];
+ }
+ else {
+ max_skips = max_skips_tbl[local_express_test];
+ }
+
+#if 0
+ {
+ int DebugWait = 1;
+
+ while (DebugWait) ;
+ }
+#endif
+
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
+
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
+ switch (sstest_type) {
+ case IND_CONTIG:
+ /* contiguous data set, independent I/O */
+ chunk_edge_size = 0;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CONTIG */
+
+ case COL_CONTIG:
+ /* contiguous data set, collective I/O */
+ chunk_edge_size = 0;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, TRUE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CONTIG */
+
+ case IND_CHUNKED:
+ /* chunked data set, independent I/O */
+ chunk_edge_size = 5;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CHUNKED */
+
+ case COL_CHUNKED:
+ /* chunked data set, collective I/O */
+ chunk_edge_size = 5;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, TRUE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CHUNKED */
+
+ default:
+ VRFY((FALSE), "unknown test type");
+ break;
+
+ } /* end of switch(sstest_type) */
+#if CONTIG_HS_DR_PIO_TEST__DEBUG
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
+ HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n",
+ tests_run, tests_skipped, total_tests);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
+ }
+ }
+
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ HDprintf("\n");
+ }
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test() */
+
+/* Main Body. Here for now, may have to move them to a separated file later. */
+
+/*
+ * Main driver of the Parallel HDF5 tests
+ */
+
+#include "testphdf5.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+
+/* global variables */
+int dim0;
+int dim1;
+int chunkdim0;
+int chunkdim1;
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
+
+/* other option flags */
+
+#ifdef USE_PAUSE
+/* pause the process for a moment to allow debugger to attach if desired. */
+/* Will pause more if greenlight file is not persent but will eventually */
+/* continue. */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+void
+pause_proc(void)
+{
+
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
+
+ /* mpi variables */
+ int mpi_size, mpi_rank;
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+
+ pid = getpid();
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Get_processor_name(mpi_name, &mpi_namelen);
+
+ if (MAINPROCESS)
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ fflush(stdout);
+ HDsleep(time_int);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/* Use the Profile feature of MPI to call the pause_proc() */
+int
+MPI_Init(int *argc, char ***argv)
+{
+ int ret_code;
+ ret_code = PMPI_Init(argc, argv);
+ pause_proc();
+ return (ret_code);
+}
+#endif /* USE_PAUSE */
+
+/*
+ * Show command usage
+ */
+static void
+usage(void)
+{
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+#if 0
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+#endif
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
+}
+
+/*
+ * parse the command line options
+ */
+static int
+parse_options(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup default chunk-size. Make sure sizes are > 0 */
+
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+#if 0
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+#endif
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return (1);
+ }
+ }
+ } /*while*/
+
+ /* check validity of dimension and chunk sizes */
+ if (dim0 <= 0 || dim1 <= 0) {
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return (1);
+ }
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return (1);
+ }
+
+ /* Make sure datasets can be divided into equal portions by the processes */
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
+ nerrors++;
+ return (1);
+ }
+
+ /* compose the test filenames */
+ {
+ int i, n;
+
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i = 0; i < n; i++)
+ strncpy(filenames[i], FILENAME[i], PATH_MAX);
+#if 0 /* no support for VFDs right now */
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+#endif
+ if (MAINPROCESS) {
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */
+static void
+sscontig1(void)
+{
+ contig_hs_dr_pio_test(IND_CONTIG);
+}
+
+/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */
+static void
+sscontig2(void)
+{
+ contig_hs_dr_pio_test(COL_CONTIG);
+}
+
+/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */
+static void
+sscontig3(void)
+{
+ contig_hs_dr_pio_test(IND_CHUNKED);
+}
+
+/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */
+static void
+sscontig4(void)
+{
+ contig_hs_dr_pio_test(COL_CHUNKED);
+}
+
+/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */
+static void
+sschecker1(void)
+{
+ ckrbrd_hs_dr_pio_test(IND_CONTIG);
+}
+
+/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */
+static void
+sschecker2(void)
+{
+ ckrbrd_hs_dr_pio_test(COL_CONTIG);
+}
+
+/* Shape Same test using checker hyperslab using independent IO on chunked datasets */
+static void
+sschecker3(void)
+{
+ ckrbrd_hs_dr_pio_test(IND_CHUNKED);
+}
+
+/* Shape Same test using checker hyperslab using collective IO on chunked datasets */
+static void
+sschecker4(void)
+{
+ ckrbrd_hs_dr_pio_test(COL_CHUNKED);
+}
+
+int
+main(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+#ifndef H5_HAVE_WIN32_API
+ /* Un-buffer the stdout and stderr */
+ HDsetbuf(stderr, NULL);
+ HDsetbuf(stdout, NULL);
+#endif
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
+
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("Shape Same Tests Start\n");
+ HDprintf(" express_test = %d.\n", EXPRESS_MODE /* GetTestExpress() */);
+ HDprintf("===================================\n");
+ }
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
+ if (MAINPROCESS)
+ HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ };
+ H5open();
+ /* h5_show_hostname(); */
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) {
+ if (MAINPROCESS)
+ HDprintf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close,
+ */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS)
+ HDprintf("API functions for basic file and dataset aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+#if 0
+ HDmemset(filenames, 0, sizeof(filenames));
+ for (int i = 0; i < NFILENAME; i++) {
+ if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) {
+ HDprintf("couldn't allocate filename array\n");
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+ }
+#endif
+
+ /* Initialize testing framework */
+ /* TestInit(argv[0], usage, parse_options); */
+
+ if (parse_options(argc, argv)) {
+ usage();
+ return 1;
+ }
+
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
+ }
+
+ /* Shape Same tests using contiguous hyperslab */
+#if 0
+ AddTest("sscontig1", sscontig1, NULL,
+ "Cntg hslab, ind IO, cntg dsets", filenames[0]);
+ AddTest("sscontig2", sscontig2, NULL,
+ "Cntg hslab, col IO, cntg dsets", filenames[0]);
+ AddTest("sscontig3", sscontig3, NULL,
+ "Cntg hslab, ind IO, chnk dsets", filenames[0]);
+ AddTest("sscontig4", sscontig4, NULL,
+ "Cntg hslab, col IO, chnk dsets", filenames[0]);
+#endif
+ if (MAINPROCESS) {
+ printf("Cntg hslab, ind IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sscontig1();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, col IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sscontig2();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, ind IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sscontig3();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, col IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sscontig4();
+
+ /* Shape Same tests using checker board hyperslab */
+#if 0
+ AddTest("sschecker1", sschecker1, NULL,
+ "Check hslab, ind IO, cntg dsets", filenames[0]);
+ AddTest("sschecker2", sschecker2, NULL,
+ "Check hslab, col IO, cntg dsets", filenames[0]);
+ AddTest("sschecker3", sschecker3, NULL,
+ "Check hslab, ind IO, chnk dsets", filenames[0]);
+ AddTest("sschecker4", sschecker4, NULL,
+ "Check hslab, col IO, chnk dsets", filenames[0]);
+#endif
+ if (MAINPROCESS) {
+ printf("Check hslab, ind IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sschecker1();
+ if (MAINPROCESS) {
+ printf("Check hslab, col IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sschecker2();
+ if (MAINPROCESS) {
+ printf("Check hslab, ind IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sschecker3();
+ if (MAINPROCESS) {
+ printf("Check hslab, col IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sschecker4();
+
+ /* Display testing information */
+ /* TestInfo(argv[0]); */
+
+ /* setup file access property list */
+ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* Parse command line arguments */
+ /* TestParseCmdLine(argc, argv); */
+
+ /* Perform requested testing */
+ /* PerformTests(); */
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Display test summary, if requested */
+ /* if (MAINPROCESS && GetTestSummary())
+ TestSummary(); */
+
+ /* Clean up test files */
+ /* h5_clean_files(FILENAME, fapl); */
+ H5Fdelete(FILENAME[0], fapl);
+ H5Pclose(fapl);
+
+ /* nerrors += GetTestNumErrs(); */
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ nerrors = temp;
+ }
+
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("Shape Same tests finished successfully\n");
+ HDprintf("===================================\n");
+ }
+
+#if 0
+ for (int i = 0; i < NFILENAME; i++) {
+ HDfree(filenames[i]);
+ filenames[i] = NULL;
+ }
+#endif
+
+ /* close HDF5 library */
+ H5close();
+
+ /* Release test infrastructure */
+ /* TestShutdown(); */
+
+ MPI_Finalize();
+
+ /* cannot just return (nerrors) because exit code is limited to 1byte */
+ return (nerrors != 0);
+}
diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c
new file mode 100644
index 0000000..5aafb0b
--- /dev/null
+++ b/testpar/API/t_span_tree.c
@@ -0,0 +1,2622 @@
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ This program will test irregular hyperslab selections with collective write and read.
+ The way to test whether collective write and read works is to use independent IO
+ output to verify the collective output.
+
+ 1) We will write two datasets with the same hyperslab selection settings;
+ one in independent mode,
+ one in collective mode,
+ 2) We will read two datasets with the same hyperslab selection settings,
+ 1. independent read to read independent output,
+ independent read to read collecive output,
+ Compare the result,
+ If the result is the same, then collective write succeeds.
+ 2. collective read to read independent output,
+ independent read to read independent output,
+ Compare the result,
+ If the result is the same, then collective read succeeds.
+
+ */
+
+#include "hdf5.h"
+#if 0
+#include "H5private.h"
+#endif
+#include "testphdf5.h"
+
+#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
+
+static void coll_write_test(int chunk_factor);
+static void coll_read_test(void);
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_cont_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * contiguous storage
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_cont_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(0);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_cont_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in
+ * contiguous storage
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_cont_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_simple_chunk_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * chunk storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_simple_chunk_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(1);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_simple_chunk_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_simple_chunk_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_complex_chunk_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
+ * storage(4 chunks)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_complex_chunk_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(4);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_complex_chunk_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_complex_chunk_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_write_test
+ *
+ * Purpose: To test the collectively irregular hyperslab write in chunk
+ * storage
+ * Input: number of chunks on each dimension
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_write_test(int chunk_factor)
+{
+
+ const char *filename;
+ hid_t facc_plist, dxfer_plist, dcrt_plist;
+ hid_t file, datasetc, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */
+
+ hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
+ hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ hsize_t chunk_dims[2];
+
+ herr_t ret;
+ int i;
+ int fillvalue = 0; /* Fill value for the dataset */
+
+ int *matrix_out = NULL;
+ int *matrix_out1 = NULL; /* Buffer to read from the dataset */
+ int *vector = NULL;
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /*
+ * Buffers' initialization.
+ */
+
+ mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size);
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ fsdim[0] = FSPACE_DIM1;
+ fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size);
+
+ vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+
+ HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1;
+ for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++)
+ vector[i] = (int)i;
+
+ /* Grab file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Create a file.
+ */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ /*
+ * Create property list for a dataset and set up fill values.
+ */
+ dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcrt_plist >= 0), "");
+
+ ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
+ VRFY((ret >= 0), "Fill value creation property list succeeded");
+
+ if (chunk_factor != 0) {
+ chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
+ chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
+ ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /*
+ *
+ * Create dataspace for the first dataset in the disk.
+ * dim1 = 9
+ * dim2 = 3600
+ *
+ *
+ */
+ fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
+ VRFY((fspaceid >= 0), "file dataspace created succeeded");
+
+ /*
+ * Create dataset in the file. Notice that creation
+ * property list dcrt_plist is used.
+ */
+ datasetc =
+ H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((datasetc >= 0), "dataset created succeeded");
+
+ dataseti =
+ H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((dataseti >= 0), "dataset created succeeded");
+
+ /* The First selection for FILE
+ *
+ * block (3,2)
+ * stride(4,3)
+ * count (1,768/mpi_size)
+ * start (0,1+768*3*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = FHSTART0;
+ start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
+ stride[0] = FHSTRIDE0;
+ stride[1] = FHSTRIDE1;
+ count[0] = FHCOUNT0;
+ count[1] = FHCOUNT1;
+ block[0] = FHBLOCK0;
+ block[1] = FHBLOCK1;
+
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE
+ *
+ * block (3,768)
+ * stride (1,1)
+ * count (1,1)
+ * start (4,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = SHSTART0;
+ start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank);
+ stride[0] = SHSTRIDE0;
+ stride[1] = SHSTRIDE1;
+ count[0] = SHCOUNT0;
+ count[1] = SHCOUNT1;
+ block[0] = SHBLOCK0;
+ block[1] = SHBLOCK1;
+
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create dataspace for the first dataset in the memory
+ * dim1 = 27000
+ *
+ */
+ mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
+ VRFY((mspaceid1 >= 0), "memory dataspace created succeeded");
+
+ /*
+ * Memory space is 1-D, this is a good test to check
+ * whether a span-tree derived datatype needs to be built.
+ * block 1
+ * stride 1
+ * count 6912/mpi_size
+ * start 1
+ *
+ */
+ start[0] = MHSTART0;
+ stride[0] = MHSTRIDE0;
+ count[0] = MHCOUNT0;
+ block[0] = MHBLOCK0;
+
+ ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* independent write */
+ ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
+ VRFY((ret >= 0), "dataset independent write succeed");
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* collective write */
+ ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
+ VRFY((ret >= 0), "dataset collective write succeed");
+
+ ret = H5Sclose(mspaceid1);
+ VRFY((ret >= 0), "");
+
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dcrt_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Open the file.
+ */
+
+ /***
+
+ For testing collective hyperslab selection write
+ In this test, we are using independent read to check
+ the correctedness of collective write compared with
+ independent write,
+
+ In order to thoroughly test this feature, we choose
+ a different selection set for reading the data out.
+
+
+ ***/
+
+ /* Obtain file access property list with MPI-IO driver */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT);
+ VRFY((datasetc >= 0), "H5Dopen2 succeeded");
+
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid = H5Dget_space(datasetc);
+ VRFY((fspaceid >= 0), "file dataspace obtained succeeded");
+
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ /* The first selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The first selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ /* The second selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The second selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ * rank = 2
+ * mdim1 = 9
+ * mdim2 = 3600
+ *
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+
+ VRFY((ret >= 0), "H5D irregular collective write succeed");
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ if (vector)
+ HDfree(vector);
+ if (matrix_out)
+ HDfree(matrix_out);
+ if (matrix_out1)
+ HDfree(matrix_out1);
+
+ return;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_read_test
+ *
+ * Purpose: To test the collectively irregular hyperslab read in chunk
+ * storage
+ * Input: number of chunks on each dimension
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+coll_read_test(void)
+{
+
+ const char *filename;
+ hid_t facc_plist, dxfer_plist;
+ hid_t file, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
+
+ /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ herr_t ret;
+
+ int i;
+
+ int *matrix_out;
+ int *matrix_out1; /* Buffer to read from the dataset */
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /* Initialize the buffer */
+
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*** For testing collective hyperslab selection read ***/
+
+ /* Obtain file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Open the file.
+ */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace.
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Collective read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out);
+ VRFY((ret >= 0), "H5D collecive read succeed");
+
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+
+ /* Independent read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+ VRFY((ret >= 0), "H5D contiguous irregular collective read succeed");
+
+ /*
+ * Free read buffers.
+ */
+ HDfree(matrix_out);
+ HDfree(matrix_out1);
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid1);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ return;
+}
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__select_checker_board():
+**
+** Given a dataspace of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, is hard coded to presume a
+** maximum dataspace rank of 5.
+**
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 11/11/09
+**
+****************************************************************/
+
+#define LDSCT_DS_RANK 5
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
+#endif
+
+#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size,
+ const int sel_rank, hsize_t sel_start[])
+{
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ HDassert(0 < checker_edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert(sel_offset >= 0);
+
+ ds_offset = test_max_rank - tgt_rank;
+ HDassert(ds_offset >= 0);
+ HDassert(ds_offset <= sel_offset);
+
+ HDassert((hsize_t)checker_edge_size <= dims[sel_offset]);
+ HDassert(dims[sel_offset] == 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
+
+ if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) {
+
+ base_count++;
+ }
+
+ offset_count =
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2)));
+
+ if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) {
+
+ offset_count++;
+ }
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count,
+ offset_count);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while (i < ds_offset) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while (i < sel_offset) {
+
+ start[i] = sel_start[i];
+ stride[i] = 2 * dims[i];
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while (i < test_max_rank) {
+
+ stride[i] = (hsize_t)(2 * checker_edge_size);
+ block[i] = (hsize_t)checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if (0 >= sel_offset) {
+
+ if (i == 0) {
+
+ start[0] = 0;
+ count[0] = base_count;
+ }
+ else {
+
+ start[0] = (hsize_t)checker_edge_size;
+ count[0] = offset_count;
+ }
+ }
+
+ j = 0;
+ do {
+ if (1 >= sel_offset) {
+
+ if (j == 0) {
+
+ start[1] = 0;
+ count[1] = base_count;
+ }
+ else {
+
+ start[1] = (hsize_t)checker_edge_size;
+ count[1] = offset_count;
+ }
+ }
+
+ k = 0;
+ do {
+ if (2 >= sel_offset) {
+
+ if (k == 0) {
+
+ start[2] = 0;
+ count[2] = base_count;
+ }
+ else {
+
+ start[2] = (hsize_t)checker_edge_size;
+ count[2] = offset_count;
+ }
+ }
+
+ l = 0;
+ do {
+ if (3 >= sel_offset) {
+
+ if (l == 0) {
+
+ start[3] = 0;
+ count[3] = base_count;
+ }
+ else {
+
+ start[3] = (hsize_t)checker_edge_size;
+ count[3] = offset_count;
+ }
+ }
+
+ m = 0;
+ do {
+ if (4 >= sel_offset) {
+
+ if (m == 0) {
+
+ start[4] = 0;
+ count[4] = base_count;
+ }
+ else {
+
+ start[4] = (hsize_t)checker_edge_size;
+ count[4] = offset_count;
+ }
+ }
+
+ if (((i + j + k + l + m) % 2) == 0) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i,
+ j, k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank,
+ sel_rank);
+ }
+#endif
+
+ if (first_selection) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+ }
+ }
+
+ m++;
+
+ } while ((m <= 1) && (4 >= sel_offset));
+
+ l++;
+
+ } while ((l <= 1) && (3 >= sel_offset));
+
+ k++;
+
+ } while ((k <= 1) && (2 >= sel_offset));
+
+ j++;
+
+ } while ((j <= 1) && (1 >= sel_offset));
+
+ i++;
+
+ } while ((i <= 1) && (0 >= sel_offset));
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the dataspace proper. */
+
+ for (i = 0; i < test_max_rank; i++) {
+
+ start[i] = 0;
+ stride[i] = dims[i];
+ count[i] = 1;
+ block[i] = dims[i];
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* lower_dim_size_comp_test__select_checker_board() */
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube opposite the origin.)
+**
+****************************************************************/
+
+#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const int mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ const int rank, const int edge_size, const int checker_edge_size,
+ uint32_t first_expected_val, hbool_t buf_starts_in_checker)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
+ }
+#endif
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do {
+ if (v >= checker_edge_size) {
+
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
+
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+ }
+#endif
+ in_checker = start_in_checker[3];
+ do {
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+ }
+#endif
+ if (z >= checker_edge_size) {
+
+ in_checker = !in_checker;
+ z = 0;
+ }
+
+ if (in_checker) {
+
+ if (*val_ptr != expected_value) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+ else if (*val_ptr != 0) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "\n");
+ }
+#endif
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+ } while ((rank >= test_max_rank) && (i < edge_size));
+
+ return (good_data);
+
+} /* lower_dim_size_comp_test__verify_data() */
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test__run_test()
+ *
+ * Purpose: Verify that a bug in the computation of the size of the
+ * lower dimensions of a dataspace in H5S_obtain_datatype()
+ * has been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LDSCT_DS_RANK 5
+
+static void
+lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
+ const hid_t dset_type)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__run_test()";
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ const char *filename;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i;
+ int start_index;
+ int stop_index;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ size_t large_ds_slice_size;
+#endif
+ uint32_t expected_value;
+ uint32_t *small_ds_buf_0 = NULL;
+ uint32_t *small_ds_buf_1 = NULL;
+ uint32_t *large_ds_buf_0 = NULL;
+ uint32_t *large_ds_buf_1 = NULL;
+ uint32_t *ptr_0;
+ uint32_t *ptr_1;
+ hsize_t small_chunk_dims[LDSCT_DS_RANK];
+ hsize_t large_chunk_dims[LDSCT_DS_RANK];
+ hsize_t small_dims[LDSCT_DS_RANK];
+ hsize_t large_dims[LDSCT_DS_RANK];
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ hsize_t small_sel_start[LDSCT_DS_RANK];
+ hsize_t large_sel_start[LDSCT_DS_RANK];
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size);
+ HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
+ small_ds_slice_size = (size_t)(1 * 1 * 10 * 10);
+ large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ large_ds_slice_size = (size_t)(10 * 10 * 10 * 10);
+
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
+ (int)small_ds_size, (int)small_ds_slice_size);
+ HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
+ (int)large_ds_size, (int)large_ds_slice_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* Allocate buffers */
+ small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = small_ds_buf_0;
+ ptr_1 = small_ds_buf_1;
+
+ for (i = 0; i < (int)small_ds_size; i++) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+ ptr_0 = large_ds_buf_0;
+ ptr_1 = large_ds_buf_1;
+
+ for (i = 0; i < (int)large_ds_size; i++) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+ /* get the file name */
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims: */
+ small_dims[0] = (hsize_t)(mpi_size + 1);
+ small_dims[1] = 1;
+ small_dims[2] = 1;
+ small_dims[3] = 10;
+ small_dims[4] = 10;
+
+ large_dims[0] = (hsize_t)(mpi_size + 1);
+ large_dims[1] = 10;
+ large_dims[2] = 10;
+ large_dims[3] = 10;
+ large_dims[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0],
+ (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
+ HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0],
+ (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
+ }
+#endif
+
+ /* create dataspaces */
+
+ full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded");
+
+ full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded");
+
+ /* Select the entire extent of the full small ds dataspaces */
+ ret = H5Sselect_all(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
+
+ /* Select the entire extent of the full large ds dataspaces */
+ ret = H5Sselect_all(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (chunk_edge_size > 0) {
+
+ small_chunk_dims[0] = (hsize_t)(1);
+ small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
+ small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ (int)small_chunk_dims[3], (int)small_chunk_dims[4]);
+ }
+#endif
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ large_chunk_dims[0] = (hsize_t)(1);
+ large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] =
+ (hsize_t)chunk_edge_size;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ (int)large_chunk_dims[3], (int)large_chunk_dims[4]);
+ }
+#endif
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset,
+ (int)large_dataset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ if (!use_collective_io) {
+
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded");
+ }
+
+ /* setup selection to write initial data to the small data sets */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = 0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = 2;
+ stride[3] = stride[4] = 2 * 10;
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = 1;
+
+ block[0] = block[1] = block[2] = 1;
+ block[3] = block[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ start[0] = 0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded");
+ }
+
+ check = H5Sselect_valid(mem_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE");
+
+ /* write the initial value of the small data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+ /* sync with the other processes before reading data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* verify that the correct data was written to the small data set,
+ * and reset the buffer to zero in passing.
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)small_ds_size; i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "small ds init data good.");
+
+ /* setup selections for writing initial data to the large data set */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)1;
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ start[0] = (hsize_t)0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ }
+
+ /* try clipping the selection back to the large dataspace proper */
+ start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)(mpi_size + 1);
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+
+ rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
+
+ rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ check = H5Sselect_valid(mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE");
+
+ /* write the initial value of the large data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank,
+ (int)mem_large_ds_sid, (int)file_large_ds_sid);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0);
+
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
+
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the large data set.
+ * in passing, reset the buffer to zeros
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)large_ds_size; i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "large ds init data good.");
+
+ /***********************************/
+ /***** INITIALIZATION COMPLETE *****/
+ /***********************************/
+
+ /* read a checkerboard selection of the process slice of the
+ * small on disk data set into the process slice of the large
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
+
+ expected_value =
+ (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[3] * small_dims[4]) + (small_sel_start[4]));
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
+
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
+ large_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)large_ds_size);
+
+ ptr_1 = large_ds_buf_1;
+
+ for (i = 0; i < start_index; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3, expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+ for (i = stop_index; i < (int)large_ds_size; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
+
+ /* read a checkerboard selection of a slice of the process slice of
+ * the large on disk data set into the process slice of the small
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
+
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
+ small_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ expected_value =
+ (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
+
+ start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size;
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)small_ds_size);
+
+ ptr_1 = small_ds_buf_1;
+
+ for (i = 0; i < start_index; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3, expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+ for (i = stop_index; i < (int)small_ds_size; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i,
+ (int)(*ptr_1));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
+
+ /* Close dataspaces */
+ ret = H5Sclose(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded");
+
+ /* Close Datasets */
+ ret = H5Dclose(small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+ if (small_ds_buf_0 != NULL)
+ HDfree(small_ds_buf_0);
+ if (small_ds_buf_1 != NULL)
+ HDfree(small_ds_buf_1);
+
+ if (large_ds_buf_0 != NULL)
+ HDfree(large_ds_buf_0);
+ if (large_ds_buf_1 != NULL)
+ HDfree(large_ds_buf_1);
+
+ return;
+
+} /* lower_dim_size_comp_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test()
+ *
+ * Purpose: Test to see if an error in the computation of the size
+ * of the lower dimensions in H5S_obtain_datatype() has
+ * been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+lower_dim_size_comp_test(void)
+{
+ /* const char *fcnName = "lower_dim_size_comp_test()"; */
+ int chunk_edge_size = 0;
+ int use_collective_io;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+ for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
+ chunk_edge_size = 0;
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+
+ chunk_edge_size = 5;
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ } /* end for */
+
+ return;
+} /* lower_dim_size_comp_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: link_chunk_collective_io_test()
+ *
+ * Purpose: Test to verify that an error in MPI type management in
+ * H5D_link_chunk_collective_io() has been corrected.
+ * In this bug, we used to free MPI types regardless of
+ * whether they were basic or derived.
+ *
+ * This test is based on a bug report kindly provided by
+ * Rob Latham of the MPICH team and ANL.
+ *
+ * The basic thrust of the test is to cause a process
+ * to participate in a collective I/O in which it:
+ *
+ * 1) Reads or writes exactly one chunk,
+ *
+ * 2) Has no in memory buffer for any other chunk.
+ *
+ * The test differers from Rob Latham's bug report in
+ * that is runs with an arbitrary number of proceeses,
+ * and uses a 1 dimensional dataset.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 12/16/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+
+void
+link_chunk_collective_io_test(void)
+{
+ /* const char *fcnName = "link_chunk_collective_io_test()"; */
+ const char *filename;
+ hbool_t mis_match = FALSE;
+ int i;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_WORLD;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hsize_t count[1] = {1};
+ hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t start[1];
+ hsize_t dims[1];
+ hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ herr_t ret; /* Generic return value */
+ hid_t file_id;
+ hid_t acc_tpl;
+ hid_t dset_id;
+ hid_t file_ds_sid;
+ hid_t write_mem_ds_sid;
+ hid_t read_mem_ds_sid;
+ hid_t ds_dcpl_id;
+ hid_t xfer_plist;
+ double diff;
+ double expected_value;
+ double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+ double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDassert(mpi_size > 0);
+
+ /* get the file name */
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims */
+ dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE));
+
+ /* setup mem and file dataspaces */
+ write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded");
+
+ read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded");
+
+ file_ds_sid = H5Screate_simple(1, dims, NULL);
+ VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded");
+
+ /* setup data set creation property list */
+ ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ /* create the data set */
+ dset_id =
+ H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* close the dataset creation property list */
+ ret = H5Pclose(ds_dcpl_id);
+ VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded");
+
+ /* setup local data */
+ expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank);
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
+
+ local_data_written[i] = expected_value;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
+ }
+
+ /* select the file and mem spaces */
+ start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
+ ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded");
+
+ ret = H5Sselect_all(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded");
+
+ /* Note that we use NO SELECTION on the read memory dataspace */
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write the data set */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written);
+
+ VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after dataset write");
+
+ /* read this processes slice of the dataset back in */
+ ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read);
+ VRFY((ret >= 0), "H5Dread() dataset read succeeded");
+
+ /* close the xfer property list */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
+
+ /* verify the data */
+ mis_match = FALSE;
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
+
+ diff = local_data_written[i] - local_data_read[i];
+ diff = fabs(diff);
+
+ if (diff >= 0.001) {
+
+ mis_match = TRUE;
+ }
+ }
+ VRFY((mis_match == FALSE), "dataset data good.");
+
+ /* Close dataspaces */
+ ret = H5Sclose(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(read_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(file_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset_id);
+ VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded");
+
+ /* close the file collectively */
+ ret = H5Fclose(file_id);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ return;
+
+} /* link_chunk_collective_io_test() */
diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c
new file mode 100644
index 0000000..ec5dae2
--- /dev/null
+++ b/testpar/API/testphdf5.c
@@ -0,0 +1,1007 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Main driver of the Parallel HDF5 tests
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+
+/* global variables */
+int dim0;
+int dim1;
+int chunkdim0;
+int chunkdim1;
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
+
+/* other option flags */
+
+/* FILENAME and filenames must have the same number of names.
+ * Use PARATESTFILE in general and use a separated filename only if the file
+ * created in one test is accessed by a different test.
+ * filenames[0] is reserved as the file name for PARATESTFILE.
+ */
+#define NFILENAME 2
+/* #define PARATESTFILE filenames[0] */
+const char *FILENAME[NFILENAME] = {"ParaTest.h5", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+
+#ifdef USE_PAUSE
+/* pause the process for a moment to allow debugger to attach if desired. */
+/* Will pause more if greenlight file is not persent but will eventually */
+/* continue. */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+void
+pause_proc(void)
+{
+
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
+
+ /* mpi variables */
+ int mpi_size, mpi_rank;
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+
+ pid = getpid();
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Get_processor_name(mpi_name, &mpi_namelen);
+
+ if (MAINPROCESS)
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ HDfflush(stdout);
+ HDsleep(time_int);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/* Use the Profile feature of MPI to call the pause_proc() */
+int
+MPI_Init(int *argc, char ***argv)
+{
+ int ret_code;
+ ret_code = PMPI_Init(argc, argv);
+ pause_proc();
+ return (ret_code);
+}
+#endif /* USE_PAUSE */
+
+/*
+ * Show command usage
+ */
+static void
+usage(void)
+{
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+#if 0
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+#endif
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
+}
+
+/*
+ * parse the command line options
+ */
+static int
+parse_options(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup default chunk-size. Make sure sizes are > 0 */
+
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+#if 0
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+#endif
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return (1);
+ }
+ }
+ } /*while*/
+
+ /* check validity of dimension and chunk sizes */
+ if (dim0 <= 0 || dim1 <= 0) {
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return (1);
+ }
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return (1);
+ }
+
+ /* Make sure datasets can be divided into equal portions by the processes */
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
+ nerrors++;
+ return (1);
+ }
+
+ /* compose the test filenames */
+ {
+ int i, n;
+
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i = 0; i < n; i++)
+ strncpy(filenames[i], FILENAME[i], PATH_MAX);
+#if 0 /* no support for VFDs right now */
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+#endif
+ if (MAINPROCESS) {
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+int
+main(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+ herr_t ret;
+
+#if 0
+ H5Ptest_param_t ndsets_params, ngroups_params;
+ H5Ptest_param_t collngroups_params;
+ H5Ptest_param_t io_mode_confusion_params;
+ H5Ptest_param_t rr_obj_flush_confusion_params;
+#endif
+
+#ifndef H5_HAVE_WIN32_API
+ /* Un-buffer the stdout and stderr */
+ HDsetbuf(stderr, NULL);
+ HDsetbuf(stdout, NULL);
+#endif
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
+
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("PHDF5 TESTS START\n");
+ HDprintf("===================================\n");
+ }
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ };
+ H5open();
+ /* h5_show_hostname(); */
+
+#if 0
+ HDmemset(filenames, 0, sizeof(filenames));
+ for (int i = 0; i < NFILENAME; i++) {
+ if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) {
+ HDprintf("couldn't allocate filename array\n");
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+ }
+#endif
+
+ /* Set up file access property list with parallel I/O access */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
+ /* Get the capability flag of the VOL connector being used */
+ ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
+ VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
+
+ /* Initialize testing framework */
+ /* TestInit(argv[0], usage, parse_options); */
+
+ if (parse_options(argc, argv)) {
+ usage();
+ return 1;
+ }
+
+ /* Tests are generally arranged from least to most complexity... */
+#if 0
+ AddTest("mpiodup", test_fapl_mpio_dup, NULL,
+ "fapl_mpio duplicate", NULL);
+#endif
+
+ if (MAINPROCESS) {
+ printf("fapl_mpio duplicate\n");
+ fflush(stdout);
+ }
+ test_fapl_mpio_dup();
+
+#if 0
+ AddTest("split", test_split_comm_access, NULL,
+ "dataset using split communicators", PARATESTFILE);
+ AddTest("props", test_file_properties, NULL,
+ "Coll Metadata file property settings", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset using split communicators\n");
+ fflush(stdout);
+ }
+ test_split_comm_access();
+
+ if (MAINPROCESS) {
+ printf("Coll Metadata file property settings\n");
+ fflush(stdout);
+ }
+ test_file_properties();
+
+#if 0
+ AddTest("idsetw", dataset_writeInd, NULL,
+ "dataset independent write", PARATESTFILE);
+ AddTest("idsetr", dataset_readInd, NULL,
+ "dataset independent read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset independent write\n");
+ fflush(stdout);
+ }
+ dataset_writeInd();
+ if (MAINPROCESS) {
+ printf("dataset independent read\n");
+ fflush(stdout);
+ }
+ dataset_readInd();
+
+#if 0
+ AddTest("cdsetw", dataset_writeAll, NULL,
+ "dataset collective write", PARATESTFILE);
+ AddTest("cdsetr", dataset_readAll, NULL,
+ "dataset collective read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset collective write\n");
+ fflush(stdout);
+ }
+ dataset_writeAll();
+ if (MAINPROCESS) {
+ printf("dataset collective read\n");
+ fflush(stdout);
+ }
+ dataset_readAll();
+
+#if 0
+ AddTest("eidsetw", extend_writeInd, NULL,
+ "extendible dataset independent write", PARATESTFILE);
+ AddTest("eidsetr", extend_readInd, NULL,
+ "extendible dataset independent read", PARATESTFILE);
+ AddTest("ecdsetw", extend_writeAll, NULL,
+ "extendible dataset collective write", PARATESTFILE);
+ AddTest("ecdsetr", extend_readAll, NULL,
+ "extendible dataset collective read", PARATESTFILE);
+ AddTest("eidsetw2", extend_writeInd2, NULL,
+ "extendible dataset independent write #2", PARATESTFILE);
+ AddTest("selnone", none_selection_chunk, NULL,
+ "chunked dataset with none-selection", PARATESTFILE);
+ AddTest("calloc", test_chunk_alloc, NULL,
+ "parallel extend Chunked allocation on serial file", PARATESTFILE);
+ AddTest("fltread", test_filter_read, NULL,
+ "parallel read of dataset written serially with filters", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("extendible dataset independent write\n");
+ fflush(stdout);
+ }
+ extend_writeInd();
+ if (MAINPROCESS) {
+ printf("extendible dataset independent read\n");
+ fflush(stdout);
+ }
+ extend_readInd();
+ if (MAINPROCESS) {
+ printf("extendible dataset collective write\n");
+ fflush(stdout);
+ }
+ extend_writeAll();
+ if (MAINPROCESS) {
+ printf("extendible dataset collective read\n");
+ fflush(stdout);
+ }
+ extend_readAll();
+ if (MAINPROCESS) {
+ printf("extendible dataset independent write #2\n");
+ fflush(stdout);
+ }
+ extend_writeInd2();
+ if (MAINPROCESS) {
+ printf("chunked dataset with none-selection\n");
+ fflush(stdout);
+ }
+ none_selection_chunk();
+ if (MAINPROCESS) {
+ printf("parallel extend Chunked allocation on serial file\n");
+ fflush(stdout);
+ }
+ test_chunk_alloc();
+ if (MAINPROCESS) {
+ printf("parallel read of dataset written serially with filters\n");
+ fflush(stdout);
+ }
+ test_filter_read();
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+#if 0
+ AddTest("cmpdsetr", compress_readAll, NULL,
+ "compressed dataset collective read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("compressed dataset collective read\n");
+ fflush(stdout);
+ }
+ compress_readAll();
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+#if 0
+ AddTest("zerodsetr", zero_dim_dset, NULL,
+ "zero dim dset", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("zero dim dset\n");
+ fflush(stdout);
+ }
+ zero_dim_dset();
+
+#if 0
+ ndsets_params.name = PARATESTFILE;
+ ndsets_params.count = ndatasets;
+ AddTest("ndsetw", multiple_dset_write, NULL,
+ "multiple datasets write", &ndsets_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("multiple datasets write\n");
+ fflush(stdout);
+ }
+ multiple_dset_write();
+
+#if 0
+ ngroups_params.name = PARATESTFILE;
+ ngroups_params.count = ngroups;
+ AddTest("ngrpw", multiple_group_write, NULL,
+ "multiple groups write", &ngroups_params);
+ AddTest("ngrpr", multiple_group_read, NULL,
+ "multiple groups read", &ngroups_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("multiple groups write\n");
+ fflush(stdout);
+ }
+ multiple_group_write();
+ if (MAINPROCESS) {
+ printf("multiple groups read\n");
+ fflush(stdout);
+ }
+ multiple_group_read();
+
+#if 0
+ AddTest("compact", compact_dataset, NULL,
+ "compact dataset test", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("compact dataset test\n");
+ fflush(stdout);
+ }
+ compact_dataset();
+
+#if 0
+ collngroups_params.name = PARATESTFILE;
+ collngroups_params.count = ngroups;
+ /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+ AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL,
+ "collective grp/dset write - independent grp/dset read",
+ &collngroups_params);
+#ifndef H5_HAVE_WIN32_API
+ AddTest("bigdset", big_dataset, NULL,
+ "big dataset test", PARATESTFILE);
+#else
+ HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
+#endif
+#endif
+
+ if (MAINPROCESS) {
+ printf("collective grp/dset write - independent grp/dset read\n");
+ fflush(stdout);
+ }
+ collective_group_write_independent_group_read();
+ if (MAINPROCESS) {
+ printf("big dataset test\n");
+ fflush(stdout);
+ }
+ big_dataset();
+
+#if 0
+ AddTest("fill", dataset_fillvalue, NULL,
+ "dataset fill value", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset fill value\n");
+ fflush(stdout);
+ }
+ dataset_fillvalue();
+
+#if 0
+ AddTest("cchunk1",
+ coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
+ AddTest("cchunk2",
+ coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
+ AddTest("cchunk3",
+ coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
+ AddTest("cchunk4",
+ coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("simple collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk1();
+ if (MAINPROCESS) {
+ printf("noncontiguous collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk2();
+ if (MAINPROCESS) {
+ printf("multi-chunk collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk3();
+ if (MAINPROCESS) {
+ printf("collective chunk io with partial non-selection\n");
+ fflush(stdout);
+ }
+ coll_chunk4();
+
+ if ((mpi_size < 3) && MAINPROCESS) {
+ HDprintf("Collective chunk IO optimization APIs ");
+ HDprintf("needs at least 3 processes to participate\n");
+ HDprintf("Collective chunk IO API tests will be skipped \n");
+ }
+
+#if 0
+ AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
+ coll_chunk5,NULL,
+ "linked chunk collective IO without optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
+ coll_chunk6,NULL,
+ "multi-chunk collective IO with direct request",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
+ coll_chunk7,NULL,
+ "linked chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
+ coll_chunk8,NULL,
+ "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
+ coll_chunk9,NULL,
+ "multiple chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
+ coll_chunk10,NULL,
+ "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
+#endif
+
+ if (mpi_size >= 3) {
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO without optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk5();
+ if (MAINPROCESS) {
+ printf("multi-chunk collective IO with direct request\n");
+ fflush(stdout);
+ }
+ coll_chunk6();
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO with optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk7();
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO transferring to multi-chunk\n");
+ fflush(stdout);
+ }
+ coll_chunk8();
+ if (MAINPROCESS) {
+ printf("multiple chunk collective IO with optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk9();
+ if (MAINPROCESS) {
+ printf("multiple chunk collective IO transferring to independent IO\n");
+ fflush(stdout);
+ }
+ coll_chunk10();
+ }
+
+#if 0
+ /* irregular collective IO tests*/
+ AddTest("ccontw",
+ coll_irregular_cont_write,NULL,
+ "collective irregular contiguous write",PARATESTFILE);
+ AddTest("ccontr",
+ coll_irregular_cont_read,NULL,
+ "collective irregular contiguous read",PARATESTFILE);
+ AddTest("cschunkw",
+ coll_irregular_simple_chunk_write,NULL,
+ "collective irregular simple chunk write",PARATESTFILE);
+ AddTest("cschunkr",
+ coll_irregular_simple_chunk_read,NULL,
+ "collective irregular simple chunk read",PARATESTFILE);
+ AddTest("ccchunkw",
+ coll_irregular_complex_chunk_write,NULL,
+ "collective irregular complex chunk write",PARATESTFILE);
+ AddTest("ccchunkr",
+ coll_irregular_complex_chunk_read,NULL,
+ "collective irregular complex chunk read",PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("collective irregular contiguous write\n");
+ fflush(stdout);
+ }
+ coll_irregular_cont_write();
+ if (MAINPROCESS) {
+ printf("collective irregular contiguous read\n");
+ fflush(stdout);
+ }
+ coll_irregular_cont_read();
+ if (MAINPROCESS) {
+ printf("collective irregular simple chunk write\n");
+ fflush(stdout);
+ }
+ coll_irregular_simple_chunk_write();
+ if (MAINPROCESS) {
+ printf("collective irregular simple chunk read\n");
+ fflush(stdout);
+ }
+ coll_irregular_simple_chunk_read();
+ if (MAINPROCESS) {
+ printf("collective irregular complex chunk write\n");
+ fflush(stdout);
+ }
+ coll_irregular_complex_chunk_write();
+ if (MAINPROCESS) {
+ printf("collective irregular complex chunk read\n");
+ fflush(stdout);
+ }
+ coll_irregular_complex_chunk_read();
+
+#if 0
+ AddTest("null", null_dataset, NULL,
+ "null dataset test", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("null dataset test\n");
+ fflush(stdout);
+ }
+ null_dataset();
+
+#if 0
+ io_mode_confusion_params.name = PARATESTFILE;
+ io_mode_confusion_params.count = 0; /* value not used */
+
+ AddTest("I/Omodeconf", io_mode_confusion, NULL,
+ "I/O mode confusion test",
+ &io_mode_confusion_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("I/O mode confusion test\n");
+ fflush(stdout);
+ }
+ io_mode_confusion();
+
+ if ((mpi_size < 3) && MAINPROCESS) {
+ HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
+ HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n");
+ }
+
+ if (mpi_size > 2) {
+#if 0
+ rr_obj_flush_confusion_params.name = PARATESTFILE;
+ rr_obj_flush_confusion_params.count = 0; /* value not used */
+ AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL,
+ "round robin object header flush confusion test",
+ &rr_obj_flush_confusion_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("round robin object header flush confusion test\n");
+ fflush(stdout);
+ }
+ rr_obj_hdr_flush_confusion();
+ }
+
+#if 0
+ AddTest("alnbg1",
+ chunk_align_bug_1, NULL,
+ "Chunk allocation with alignment bug.",
+ PARATESTFILE);
+
+ AddTest("tldsc",
+ lower_dim_size_comp_test, NULL,
+ "test lower dim size comp in span tree to mpi derived type",
+ PARATESTFILE);
+
+ AddTest("lccio",
+ link_chunk_collective_io_test, NULL,
+ "test mpi derived type management",
+ PARATESTFILE);
+
+ AddTest("actualio", actual_io_mode_tests, NULL,
+ "test actual io mode proprerty",
+ PARATESTFILE);
+
+ AddTest("nocolcause", no_collective_cause_tests, NULL,
+ "test cause for broken collective io",
+ PARATESTFILE);
+
+ AddTest("edpl", test_plist_ed, NULL,
+ "encode/decode Property Lists", NULL);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Chunk allocation with alignment bug\n");
+ fflush(stdout);
+ }
+ chunk_align_bug_1();
+ if (MAINPROCESS) {
+ printf("test lower dim size comp in span tree to mpi derived type\n");
+ fflush(stdout);
+ }
+ lower_dim_size_comp_test();
+ if (MAINPROCESS) {
+ printf("test mpi derived type management\n");
+ fflush(stdout);
+ }
+ link_chunk_collective_io_test();
+ if (MAINPROCESS) {
+ printf("test actual io mode property - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* actual_io_mode_tests(); */
+ if (MAINPROCESS) {
+ printf("test cause for broken collective io - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* no_collective_cause_tests(); */
+ if (MAINPROCESS) {
+ printf("encode/decode Property Lists\n");
+ fflush(stdout);
+ }
+ test_plist_ed();
+
+ if ((mpi_size < 2) && MAINPROCESS) {
+ HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n");
+ HDprintf("File Image Ops daisy chain test will be skipped \n");
+ }
+
+#if 0
+ AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
+ "file image ops daisy chain", NULL);
+#endif
+
+ if (mpi_size >= 2) {
+ if (MAINPROCESS) {
+ printf("file image ops daisy chain - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* file_image_daisy_chain_test(); */
+ }
+
+ if ((mpi_size < 2) && MAINPROCESS) {
+ HDprintf("Atomicity tests need at least 2 processes to participate\n");
+ HDprintf("8 is more recommended.. Atomicity tests will be skipped \n");
+ }
+ else if (facc_type != FACC_MPIO && MAINPROCESS) {
+ HDprintf("Atomicity tests will not work with a non MPIO VFD\n");
+ }
+ else if (mpi_size >= 2 && facc_type == FACC_MPIO) {
+#if 0
+ AddTest("atomicity", dataset_atomicity, NULL,
+ "dataset atomic updates", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset atomic updates - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* dataset_atomicity(); */
+ }
+
+#if 0
+ AddTest("denseattr", test_dense_attr, NULL,
+ "Store Dense Attributes", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Store Dense Attributes\n");
+ fflush(stdout);
+ }
+ test_dense_attr();
+
+#if 0
+ AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
+ "Collective Metadata read with some ranks having no selection", PARATESTFILE);
+ AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL,
+ "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE);
+ AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL,
+ "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Collective Metadata read with some ranks having no selection\n");
+ fflush(stdout);
+ }
+ test_partial_no_selection_coll_md_read();
+ if (MAINPROCESS) {
+ printf("Collective MD read with multi chunk I/O\n");
+ fflush(stdout);
+ }
+ test_multi_chunk_io_addrmap_issue();
+ if (MAINPROCESS) {
+ printf("Collective MD read with link chunk I/O\n");
+ fflush(stdout);
+ }
+ test_link_chunk_io_sort_chunk_issue();
+
+ /* Display testing information */
+ /* TestInfo(argv[0]); */
+
+ /* setup file access property list */
+ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* Parse command line arguments */
+ /* TestParseCmdLine(argc, argv); */
+
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
+ }
+
+ /* Perform requested testing */
+ /* PerformTests(); */
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Display test summary, if requested */
+ /* if (MAINPROCESS && GetTestSummary())
+ TestSummary(); */
+
+ /* Clean up test files */
+ /* h5_clean_files(FILENAME, fapl); */
+ H5Fdelete(FILENAME[0], fapl);
+ H5Pclose(fapl);
+
+ /* nerrors += GetTestNumErrs(); */
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ nerrors = temp;
+ }
+
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***PHDF5 tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("PHDF5 tests finished successfully\n");
+ HDprintf("===================================\n");
+ }
+
+#if 0
+ for (int i = 0; i < NFILENAME; i++) {
+ HDfree(filenames[i]);
+ filenames[i] = NULL;
+ }
+#endif
+
+ /* close HDF5 library */
+ H5close();
+
+ /* Release test infrastructure */
+ /* TestShutdown(); */
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrors) because exit code is limited to 1byte */
+ return (nerrors != 0);
+}
diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h
new file mode 100644
index 0000000..27d53e2
--- /dev/null
+++ b/testpar/API/testphdf5.h
@@ -0,0 +1,343 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* common definitions used by all parallel hdf5 test programs. */
+
+#ifndef PHDF5TEST_H
+#define PHDF5TEST_H
+
+#include "H5private.h"
+#include "testpar.h"
+#include "H5_api_tests_disabled.h"
+
+/*
+ * Define parameters for various tests since we do not have access to
+ * passing parameters to tests via the testphdf5 test framework.
+ */
+#define PARATESTFILE "ParaTest.h5"
+#define NDATASETS 300
+#define NGROUPS 256
+
+/* Disable express testing by default */
+#define EXPRESS_MODE 0
+
+enum H5TEST_COLL_CHUNK_API {
+ API_NONE = 0,
+ API_LINK_HARD,
+ API_MULTI_HARD,
+ API_LINK_TRUE,
+ API_LINK_FALSE,
+ API_MULTI_COLL,
+ API_MULTI_IND
+};
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+/* Constants definitions */
+#define DIM0 600 /* Default dataset sizes. */
+#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
+#define RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
+
+/* point selection order */
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
+
+/* Hyperslab layout styles */
+#define BYROW 1 /* divide into slabs of rows */
+#define BYCOL 2 /* divide into blocks of columns */
+#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */
+#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */
+
+/* File_Access_type bits */
+#define FACC_DEFAULT 0x0 /* default */
+#define FACC_MPIO 0x1 /* MPIO */
+#define FACC_SPLIT 0x2 /* Split File */
+
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
+/*Constants for collective chunk definitions */
+#define SPACE_DIM1 24
+#define SPACE_DIM2 4
+#define BYROW_CONT 1
+#define BYROW_DISCONT 2
+#define BYROW_SELECTNONE 3
+#define BYROW_SELECTUNBALANCE 4
+#define BYROW_SELECTINCHUNK 5
+
+#define DIMO_NUM_CHUNK 4
+#define DIM1_NUM_CHUNK 2
+#define LINK_TRUE_NUM_CHUNK 2
+#define LINK_FALSE_NUM_CHUNK 6
+#define MULTI_TRUE_PERCENT 50
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
+#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
+#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
+#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
+#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
+#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp"
+
+#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
+
+/*Constants for MPI derived data type generated from span tree */
+
+#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
+#define MSPACE1_DIM 27000 /* Dataset size in memory */
+#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
+#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
+#define FSPACE_DIM2 3600
+/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 9
+#define MSPACE_DIM2 3600
+#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
+#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
+#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
+#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
+
+#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
+#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
+#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
+#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
+#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+
+#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
+#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define NPOINTS \
+ 4 /* Number of points that will be selected \
+ and overwritten */
+
+/* Definitions of the selection mode for the test_actual_io_function. */
+#define TEST_ACTUAL_IO_NO_COLLECTIVE 0
+#define TEST_ACTUAL_IO_RESET 1
+#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2
+#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
+#define TEST_ACTUAL_IO_LINK_CHUNK 8
+#define TEST_ACTUAL_IO_CONTIGUOUS 9
+
+/* Definitions of the selection mode for the no_collective_cause_tests function. */
+#define TEST_COLLECTIVE 0x001
+#define TEST_SET_INDEPENDENT 0x002
+#define TEST_DATATYPE_CONVERSION 0x004
+#define TEST_DATA_TRANSFORMS 0x008
+#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
+
+/* Don't erase these lines, they are put here for debugging purposes */
+/*
+#define MSPACE1_RANK 1
+#define MSPACE1_DIM 50
+#define MSPACE2_RANK 1
+#define MSPACE2_DIM 4
+#define FSPACE_RANK 2
+#define FSPACE_DIM1 8
+#define FSPACE_DIM2 12
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 8
+#define MSPACE_DIM2 9
+#define NPOINTS 4
+*/ /* end of debugging macro */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+/* Collective chunk instrumentation properties */
+#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard"
+#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_true"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_false"
+#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll"
+#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind"
+
+/* Definitions for all collective chunk instrumentation properties */
+#define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned)
+#define H5D_XFER_COLL_CHUNK_DEF 1
+
+/* General collective I/O instrumentation properties */
+#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast"
+
+/* Definitions for general collective I/O instrumentation properties */
+#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(hbool_t)
+#define H5D_XFER_COLL_RANK0_BCAST_DEF FALSE
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+/* type definitions */
+typedef struct H5Ptest_param_t /* holds extra test parameters */
+{
+ char *name;
+ int count;
+} H5Ptest_param_t;
+
+/* Dataset data type. Int's can be easily octo dumped. */
+typedef int DATATYPE;
+
+/* Shape Same Tests Definitions */
+typedef enum {
+ IND_CONTIG, /* Independent IO on contiguous datasets */
+ COL_CONTIG, /* Collective IO on contiguous datasets */
+ IND_CHUNKED, /* Independent IO on chunked datasets */
+ COL_CHUNKED /* Collective IO on chunked datasets */
+} ShapeSameTestMethods;
+
+/* Shared global variables */
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern H5E_auto2_t old_func; /* previous error handler */
+extern void *old_client_data; /*previous error handler arg.*/
+extern int facc_type; /*Test file access type */
+extern int dxfer_coll_type;
+
+/* Test program prototypes */
+void test_plist_ed(void);
+#if 0
+void external_links(void);
+#endif
+void zero_dim_dset(void);
+void test_file_properties(void);
+void test_delete(void);
+void multiple_dset_write(void);
+void multiple_group_write(void);
+void multiple_group_read(void);
+void collective_group_write_independent_group_read(void);
+void collective_group_write(void);
+void independent_group_read(void);
+void test_fapl_mpio_dup(void);
+void test_split_comm_access(void);
+void test_page_buffer_access(void);
+void dataset_atomicity(void);
+void dataset_writeInd(void);
+void dataset_writeAll(void);
+void extend_writeInd(void);
+void extend_writeInd2(void);
+void extend_writeAll(void);
+void dataset_readInd(void);
+void dataset_readAll(void);
+void extend_readInd(void);
+void extend_readAll(void);
+void none_selection_chunk(void);
+void actual_io_mode_tests(void);
+void no_collective_cause_tests(void);
+void test_chunk_alloc(void);
+void test_filter_read(void);
+void compact_dataset(void);
+void null_dataset(void);
+void big_dataset(void);
+void dataset_fillvalue(void);
+void coll_chunk1(void);
+void coll_chunk2(void);
+void coll_chunk3(void);
+void coll_chunk4(void);
+void coll_chunk5(void);
+void coll_chunk6(void);
+void coll_chunk7(void);
+void coll_chunk8(void);
+void coll_chunk9(void);
+void coll_chunk10(void);
+void coll_irregular_cont_read(void);
+void coll_irregular_cont_write(void);
+void coll_irregular_simple_chunk_read(void);
+void coll_irregular_simple_chunk_write(void);
+void coll_irregular_complex_chunk_read(void);
+void coll_irregular_complex_chunk_write(void);
+void io_mode_confusion(void);
+void rr_obj_hdr_flush_confusion(void);
+void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm);
+void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm);
+void chunk_align_bug_1(void);
+void lower_dim_size_comp_test(void);
+void link_chunk_collective_io_test(void);
+void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
+void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
+void file_image_daisy_chain_test(void);
+#ifdef H5_HAVE_FILTER_DEFLATE
+void compress_readAll(void);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+void test_dense_attr(void);
+void test_partial_no_selection_coll_md_read(void);
+void test_multi_chunk_io_addrmap_issue(void);
+void test_link_chunk_io_sort_chunk_issue(void);
+void test_collective_global_heap_write(void);
+
+/* commonly used prototypes */
+hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
+MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info);
+int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original);
+void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order);
+#endif /* PHDF5TEST_H */
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 3a44fca..d34b800 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -104,10 +104,30 @@ set (H5P_TESTS
t_vfd
)
+set (HDF5_API_TESTS
+ attribute
+ dataset
+ datatype
+ file
+ group
+ link
+ misc
+ object
+)
+
+if (HDF5_TEST_API_ENABLE_ASYNC)
+ set (HDF5_API_TESTS
+ ${HDF5_API_TESTS}
+ async
+ )
+endif ()
+
foreach (h5_testp ${H5P_TESTS})
ADD_H5P_EXE(${h5_testp})
endforeach ()
+add_subdirectory (API)
+
if (HDF5_TEST_PARALLEL)
include (CMakeTests.cmake)
endif ()