summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt5
-rw-r--r--testpar/CMakeVFDTests.cmake51
-rw-r--r--testpar/Makefile.am8
-rw-r--r--testpar/t_subfiling_vfd.c260
-rw-r--r--testpar/t_vfd.c933
5 files changed, 956 insertions, 301 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 15723c9..907fd0a 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -25,7 +25,7 @@ set (testphdf5_SOURCES
add_executable (testphdf5 ${testphdf5_SOURCES})
target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}")
target_include_directories (testphdf5
- PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+ PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
)
if (NOT BUILD_SHARED_LIBS)
TARGET_C_PROPERTIES (testphdf5 STATIC)
@@ -51,7 +51,7 @@ macro (ADD_H5P_EXE file)
add_executable (${file} ${HDF5_TEST_PAR_SOURCE_DIR}/${file}.c)
target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}")
target_include_directories (${file}
- PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+ PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
)
if (NOT BUILD_SHARED_LIBS)
TARGET_C_PROPERTIES (${file} STATIC)
@@ -89,6 +89,7 @@ set (H5P_TESTS
t_init_term
t_shapesame
t_filters_parallel
+ t_subfiling_vfd
t_2Gio
t_vfd
)
diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake
index d6a4025..d630015 100644
--- a/testpar/CMakeVFDTests.cmake
+++ b/testpar/CMakeVFDTests.cmake
@@ -22,29 +22,42 @@ set (H5P_VFD_TESTS
t_pflush2
)
+set (H5P_VFD_subfiling_TESTS_SKIP
+ t_pflush1
+ t_pflush2
+)
+
macro (ADD_VFD_TEST vfdname resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
foreach (h5_test ${H5P_VFD_TESTS})
- add_test (
- NAME MPI_TEST_VFD-${vfdname}-${h5_test}
- COMMAND "${CMAKE_COMMAND}"
- -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}"
- -D "TEST_PROGRAM=$<TARGET_FILE:${h5_test}>"
- -D "TEST_ARGS:STRING="
- -D "TEST_VFD:STRING=${vfdname}"
- -D "TEST_EXPECT=${resultcode}"
- -D "TEST_OUTPUT=${vfdname}-${h5_test}.out"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/${vfdname}"
- -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
- )
- set_tests_properties (MPI_TEST_VFD-${vfdname}-${h5_test} PROPERTIES
- ENVIRONMENT "srcdir=${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}"
- WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}
- )
+ if (NOT "${h5_test}" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP)
+ add_test (
+ NAME MPI_TEST_VFD-${vfdname}-${h5_test}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${h5_test}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_VFD:STRING=${vfdname}"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_OUTPUT=${vfdname}-${h5_test}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/${vfdname}"
+ -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
+ )
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-${h5_test} PROPERTIES
+ ENVIRONMENT "srcdir=${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}
+ )
+ endif ()
endforeach ()
- set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush1 PROPERTIES WILL_FAIL "true")
- #set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
- set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-t_pflush1)
+ if (NOT "t_pflush1" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP)
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush1 PROPERTIES WILL_FAIL "true")
+ #set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
+ endif ()
+ if (NOT "t_pflush2" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP)
+ if (NOT "t_pflush1" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP)
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-t_pflush1)
+ endif ()
+ endif ()
endif ()
endmacro ()
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index ff4a3dd..b53553a 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -21,6 +21,10 @@ include $(top_srcdir)/config/commence.am
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
+if SUBFILING_VFD_CONDITIONAL
+ AM_CPPFLAGS += -I$(top_srcdir)/src/H5FDsubfiling
+endif
+
# Test scripts--
# testpflush.sh:
TEST_SCRIPT_PARA = testpflush.sh
@@ -32,6 +36,10 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA)
#
TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio t_vfd
+if SUBFILING_VFD_CONDITIONAL
+ TEST_PROG_PARA += t_subfiling_vfd
+endif
+
# t_pflush1 and t_pflush2 are used by testpflush.sh
check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2
diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c
new file mode 100644
index 0000000..7c21e7e
--- /dev/null
+++ b/testpar/t_subfiling_vfd.c
@@ -0,0 +1,260 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * HDF5 Subfiling VFD tests
+ */
+
+#include <mpi.h>
+#include <libgen.h>
+
+#include "testpar.h"
+#include "H5srcdir.h"
+
+#ifdef H5_HAVE_SUBFILING_VFD
+
+#include "H5FDsubfiling.h"
+#include "H5FDioc.h"
+
+#define SUBFILING_TEST_DIR H5FD_SUBFILING_NAME
+
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
+#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+static int mpi_rank;
+static int mpi_size;
+
+int nerrors = 0;
+
+/* Function pointer typedef for test functions */
+typedef void (*test_func)(void);
+
+/* Utility functions */
+static hid_t create_subfiling_ioc_fapl(void);
+
+/* Test functions */
+static void test_create_and_close(void);
+
+static test_func tests[] = {
+ test_create_and_close,
+};
+
+/* ---------------------------------------------------------------------------
+ * Function: create_subfiling_ioc_fapl
+ *
+ * Purpose: Create and populate a subfiling FAPL ID that uses either the
+ * IOC VFD or sec2 VFD.
+ *
+ * Return: Success: HID of the top-level (subfiling) FAPL, a non-negative
+ * value.
+ * Failure: H5I_INVALID_HID, a negative value.
+ * ---------------------------------------------------------------------------
+ */
+static hid_t
+create_subfiling_ioc_fapl(void)
+{
+ H5FD_subfiling_config_t *subfiling_conf = NULL;
+ H5FD_ioc_config_t * ioc_conf = NULL;
+ hid_t ioc_fapl = H5I_INVALID_HID;
+ hid_t ret_value = H5I_INVALID_HID;
+
+ if (NULL == (subfiling_conf = HDcalloc(1, sizeof(*subfiling_conf))))
+ TEST_ERROR;
+
+ if ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ if ((ret_value = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_mpi_params(ret_value, comm, info) < 0)
+ TEST_ERROR;
+
+ /* Get defaults for Subfiling configuration */
+ if (H5Pget_fapl_subfiling(ret_value, subfiling_conf) < 0)
+ TEST_ERROR;
+
+ if (subfiling_conf->require_ioc) {
+ if (NULL == (ioc_conf = HDcalloc(1, sizeof(*ioc_conf))))
+ TEST_ERROR;
+
+ /* Get IOC VFD defaults */
+ if (H5Pget_fapl_ioc(ioc_fapl, ioc_conf) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_mpi_params(ioc_fapl, comm, info) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_fapl_ioc(ioc_fapl, ioc_conf) < 0)
+ TEST_ERROR;
+ }
+ else {
+ if (H5Pset_fapl_sec2(ioc_fapl) < 0)
+ TEST_ERROR;
+ }
+
+ subfiling_conf->ioc_fapl_id = ioc_fapl;
+
+ if (H5Pset_fapl_subfiling(ret_value, subfiling_conf) < 0)
+ TEST_ERROR;
+
+ HDfree(ioc_conf);
+ HDfree(subfiling_conf);
+
+ return ret_value;
+
+error:
+ HDfree(ioc_conf);
+ HDfree(subfiling_conf);
+
+ if ((H5I_INVALID_HID != ioc_fapl) && (H5Pclose(ioc_fapl) < 0)) {
+ H5_FAILED();
+ AT();
+ }
+ if ((H5I_INVALID_HID != ret_value) && (H5Pclose(ret_value) < 0)) {
+ H5_FAILED();
+ AT();
+ }
+
+ return H5I_INVALID_HID;
+}
+
+/*
+ * A simple test that creates and closes a file with the
+ * subfiling VFD
+ */
+static void
+test_create_and_close(void)
+{
+ H5FD_subfiling_config_t subfiling_config;
+ const char * test_filenames[2];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ TESTING("File creation and immediate close");
+
+ fapl_id = create_subfiling_ioc_fapl();
+ VRFY((fapl_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pget_fapl_subfiling(fapl_id, &subfiling_config) >= 0), "H5Pget_fapl_subfiling succeeded");
+
+ file_id = H5Fcreate("basic_create.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ test_filenames[0] = "basic_create.h5";
+ test_filenames[1] = NULL;
+ h5_clean_files(test_filenames, fapl_id);
+
+ if (H5P_DEFAULT != subfiling_config.ioc_fapl_id)
+ VRFY((H5Pclose(subfiling_config.ioc_fapl_id) >= 0), "FAPL close succeeded");
+
+ return;
+}
+
+int
+main(int argc, char **argv)
+{
+ int required = MPI_THREAD_MULTIPLE;
+ int provided = 0;
+ int mpi_code;
+
+ /* Initialize MPI */
+ if (MPI_SUCCESS != MPI_Init_thread(&argc, &argv, required, &provided)) {
+ HDprintf("MPI_Init_thread failed\n");
+ nerrors++;
+ goto exit;
+ }
+
+ if (provided != required) {
+ HDprintf("MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE\n");
+ nerrors++;
+ goto exit;
+ }
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ H5open();
+
+ if (H5dont_atexit() < 0) {
+ if (MAINPROCESS)
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ }
+
+ /* Enable selection I/O using internal temporary workaround */
+ H5_use_selection_io_g = TRUE;
+
+ if (MAINPROCESS) {
+ HDprintf("Testing Subfiling VFD functionality\n");
+ }
+
+ TestAlarmOn();
+
+ /* Create directories for test-generated .h5 files */
+ if ((HDmkdir(SUBFILING_TEST_DIR, (mode_t)0755) < 0) && (errno != EEXIST)) {
+ HDprintf("couldn't create subfiling testing directory\n");
+ nerrors++;
+ goto exit;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ (*tests[i])();
+ }
+ else {
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
+
+ if (nerrors)
+ goto exit;
+
+ if (MAINPROCESS)
+ HDputs("All Subfiling VFD tests passed\n");
+
+exit:
+ if (nerrors) {
+ if (MAINPROCESS)
+ HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : "");
+ }
+
+ TestAlarmOff();
+
+ H5close();
+
+ MPI_Finalize();
+
+ HDexit(nerrors ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#else /* H5_HAVE_SUBFILING_VFD */
+
+int
+main(void)
+{
+ h5_reset();
+ HDprintf("Testing Subfiling VFD functionality\n");
+ HDprintf("SKIPPED - Subfiling VFD not built\n");
+ HDexit(EXIT_SUCCESS);
+}
+
+#endif /* H5_HAVE_SUBFILING_VFD */
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
index ad296ad..49c2a42 100644
--- a/testpar/t_vfd.c
+++ b/testpar/t_vfd.c
@@ -15,22 +15,41 @@
* This file is a catchall for parallel VFD tests.
*/
+#include <libgen.h>
+
#include "testphdf5.h"
+#ifdef H5_HAVE_SUBFILING_VFD
+#include "H5FDsubfiling.h"
+#include "H5FDioc.h"
+#endif
+
/* Must be a power of 2. Reducing it below 1024 may cause problems */
#define INTS_PER_RANK 1024
/* global variable declarations: */
-hbool_t pass = TRUE; /* set to FALSE on error */
-const char *failure_mssg = NULL;
-
-const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
- "mpio_vfd_test_file_1", /*1*/
- "mpio_vfd_test_file_2", /*2*/
- "mpio_vfd_test_file_3", /*3*/
- "mpio_vfd_test_file_4", /*4*/
- "mpio_vfd_test_file_5", /*5*/
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+
+hbool_t pass = TRUE; /* set to FALSE on error */
+hbool_t disp_failure_mssgs = TRUE; /* global force display of failure messages */
+const char *failure_mssg = NULL;
+
+const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
+ "mpio_vfd_test_file_1", /*1*/
+ "mpio_vfd_test_file_2", /*2*/
+ "mpio_vfd_test_file_3", /*3*/
+ "mpio_vfd_test_file_4", /*4*/
+ "mpio_vfd_test_file_5", /*5*/
+ "mpio_vfd_test_file_6", /*6*/
+ "subfiling_vfd_test_file_0", /*7*/
+ "subfiling_vfd_test_file_1", /*8*/
+ "subfiling_vfd_test_file_2", /*9*/
+ "subfiling_vfd_test_file_3", /*10*/
+ "subfiling_vfd_test_file_4", /*11*/
+ "subfiling_vfd_test_file_5", /*12*/
+ "subfiling_vfd_test_file_6", /*13*/
NULL};
/* File Test Images
@@ -82,6 +101,8 @@ static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -244,7 +265,7 @@ free_file_images(void)
*
* Modifications:
*
- * None.
+ * Updated for subfiling VFD 9/29/30
*
*-------------------------------------------------------------------------
*/
@@ -271,6 +292,20 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* setup the file name -- do this now, since setting up the ioc faple requires it. This will probably
+ * change */
+ if (pass) {
+
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* setupf fapl for target VFD */
if (pass) {
@@ -283,16 +318,101 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (pass) {
- if (strcmp(vfd_name, "mpio") == 0) {
+ if (HDstrcmp(vfd_name, "mpio") == 0) {
- if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) {
+ if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) {
pass = FALSE;
failure_mssg = "Can't set mpio fapl.";
}
}
- else {
+#ifdef H5_HAVE_SUBFILING_VFD
+ else if (HDstrcmp(vfd_name, H5FD_SUBFILING_NAME) == 0) {
+
+ hid_t ioc_fapl;
+ H5FD_ioc_config_t ioc_config = {/* magic = */ H5FD_IOC_FAPL_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* thread_pool_count = */ H5FD_IOC_THREAD_POOL_SIZE};
+ H5FD_subfiling_config_t subfiling_conf = {
+ /* magic = */ H5FD_IOC_FAPL_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* require_ioc = */ TRUE};
+
+ if ((pass) && ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't create ioc fapl.";
+ }
+
+ /* set the MPI communicator and info in the FAPL */
+ if (H5Pset_mpi_params(ioc_fapl, comm, info) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set MPI communicator and info in IOC fapl.";
+ }
+
+ /* set the MPI communicator and info in the FAPL */
+ if (H5Pset_mpi_params(fapl_id, comm, info) < 0) {
+ pass = FALSE;
+ failure_mssg = "Can't set MPI communicator and info in subfiling fapl.";
+ }
+
+ HDmemset(&ioc_config, 0, sizeof(ioc_config));
+ HDmemset(&subfiling_conf, 0, sizeof(subfiling_conf));
+
+ /* Get subfiling VFD defaults */
+ if ((pass) && (H5Pget_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get sub-filing VFD defaults.";
+ }
+
+ if ((pass) && (subfiling_conf.require_ioc)) {
+
+ /* Get IOC VFD defaults */
+ if ((pass) && ((H5Pget_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get IOC VFD defaults.";
+ }
+
+ /* Now we can set the IOC fapl. */
+ if ((pass) && ((H5Pset_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set IOC fapl.";
+ }
+ }
+ else {
+
+ if ((pass) && ((H5Pset_fapl_sec2(ioc_fapl) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set sec2 fapl.";
+ }
+ }
+
+ /* Assign the IOC fapl as the underlying VPD */
+ subfiling_conf.ioc_fapl_id = ioc_fapl;
+
+ /* Now we can set the SUBFILING fapl before returning. */
+ if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set subfiling fapl.";
+ }
+ }
+#endif
+ else {
pass = FALSE;
failure_mssg = "un-supported VFD";
}
@@ -383,7 +503,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (pass) { /* setup pointers with return values */
- strncpy(file_name, filename, 512);
+ HDstrncpy(file_name, filename, 512);
*lf_ptr = lf;
*fapl_id_ptr = fapl_id;
*dxpl_id_ptr = dxpl_id;
@@ -455,21 +575,21 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
/* 6) On rank 0, delete the test file.
*/
- if (pass) {
+ /* wait for everyone to close the file */
+ MPI_Barrier(comm);
- /* wait for everyone to close the file */
- MPI_Barrier(MPI_COMM_WORLD);
+ if (pass) {
if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
pass = FALSE;
failure_mssg = "HDremove() failed.\n";
}
-
- /* wait for the file delete to complete */
- MPI_Barrier(MPI_COMM_WORLD);
}
+ /* wait for the file delete to complete */
+ MPI_Barrier(comm);
+
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -571,20 +691,20 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -626,11 +746,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -694,11 +810,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -706,11 +818,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -726,7 +834,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -820,20 +928,20 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -875,11 +983,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -930,11 +1034,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 6) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1008,11 +1108,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 9) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1020,12 +1116,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 10) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
-
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1040,7 +1131,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1145,20 +1236,20 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1200,11 +1291,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1356,11 +1443,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1368,11 +1451,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1388,7 +1467,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1517,20 +1596,20 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1572,11 +1651,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1834,11 +1909,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1846,11 +1917,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1866,7 +1933,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1963,20 +2030,20 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2018,11 +2085,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2117,11 +2180,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2129,11 +2188,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2148,8 +2203,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
else {
H5_FAILED();
-
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2219,20 +2273,20 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 1 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2280,11 +2334,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2321,11 +2371,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2341,7 +2387,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2421,20 +2467,20 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 2 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2529,11 +2575,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 4) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2588,11 +2630,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2608,7 +2646,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2689,20 +2727,20 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 3 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2761,16 +2799,6 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
sizes[3] = bytes_per_write;
bufs[3] = (const void *)(&(zero_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
-#if 0 /* JRM */
- HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
- (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
- HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
- (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
- HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
- (unsigned long long)bufs[0], (unsigned long long)bufs[1],
- (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
-#endif /* JRM */
-
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
pass = FALSE;
@@ -2783,11 +2811,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2867,11 +2891,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2887,7 +2907,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2974,20 +2994,20 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 4 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3047,16 +3067,6 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
bufs[3] =
(const void *)(&(increasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
-#if 0 /* JRM */
- HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
- (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
- HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
- (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
- HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
- (unsigned long long)bufs[0], (unsigned long long)bufs[1],
- (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
-#endif /* JRM */
-
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
pass = FALSE;
@@ -3069,11 +3079,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3153,11 +3159,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3173,7 +3175,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3296,20 +3298,20 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 5 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3356,11 +3358,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3476,11 +3474,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3519,6 +3513,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
}
}
else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) {
@@ -3527,6 +3524,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
}
}
else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) {
@@ -3535,6 +3535,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.3)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
}
}
else {
@@ -3554,6 +3557,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
}
}
else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) {
@@ -3562,6 +3568,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
}
}
else {
@@ -3581,6 +3590,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (3.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
}
}
else {
@@ -3615,11 +3627,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 7) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3635,7 +3643,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3731,20 +3739,20 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 6 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3785,11 +3793,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3839,11 +3843,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3885,27 +3885,278 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if ((disp_failure_mssgs) || (show_progress)) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+ return (!pass);
+
+} /* vector_write_test_6() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_7()
+ *
+ * Purpose: Test vector I/O with larger vectors -- 8 elements in each
+ * vector for now.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ *
+ * 3) Barrier
+ *
+ * 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector of length 8, with each element of
+ * length INTS_PER_RANK / 16, and base address
+ * base_addr + i * (INTS_PER_RANK / 8), where i is
+ * the index of the entry (starting at zero). Draw
+ * written data from the equivalent locations in
+ * increasing_fi_buf.
+ *
+ * Write the vector.
+ *
+ * 5) Barrier
+ *
+ * 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, and increasing_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 7) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 10/10/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_7()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ haddr_t addr_increment;
+ int base_index;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int k;
+ uint32_t count;
+ H5FD_mem_t types[8];
+ haddr_t addrs[8];
+ size_t sizes[8];
+ const void *bufs[8];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
if (pass) {
- MPI_Barrier(MPI_COMM_WORLD);
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
}
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 8) Close the test file and delete it (on rank 0 only).
- * Close FAPL and DXPL.
+ /* 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ */
+ if (pass) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
*/
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if (pass) {
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ base_index = mpi_rank * INTS_PER_RANK;
+ base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t));
+ addr_increment = (haddr_t)((INTS_PER_RANK / 8) * sizeof(int32_t));
+
+ count = 8;
+
+ for (i = 0; i < (int)count; i++) {
+
+ types[i] = H5FD_MEM_DRAW;
+ addrs[i] = base_addr + ((haddr_t)(i)*addr_increment);
+ sizes[i] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t);
+ bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * (INTS_PER_RANK / 8))]));
+ }
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
}
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* 5) Barrier */
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ k = j - base_index;
+
+ if ((k % (INTS_PER_RANK / 8)) < (INTS_PER_RANK / 16)) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, 0 expected.\n", j, read_fi_buf[j]);
+ }
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* report results */
if (mpi_rank == 0) {
@@ -3917,7 +4168,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3925,7 +4176,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
return (!pass);
-} /* vector_write_test_6() */
+} /* vector_write_test_7() */
/*-------------------------------------------------------------------------
* Function: main
@@ -3948,12 +4199,32 @@ int
main(int argc, char **argv)
{
unsigned nerrs = 0;
- int mpi_size;
- int mpi_rank;
+#ifdef H5_HAVE_SUBFILING_VFD
+ int required = MPI_THREAD_MULTIPLE;
+ int provided = 0;
+#endif
+ int mpi_size;
+ int mpi_rank;
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+#ifdef H5_HAVE_SUBFILING_VFD
+ if (MPI_SUCCESS != MPI_Init_thread(&argc, &argv, required, &provided)) {
+ HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n");
+ goto finish;
+ }
+
+ if (provided != required) {
+ HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n");
+ goto finish;
+ }
+#else
+ if (MPI_SUCCESS != MPI_Init(&argc, &argv)) {
+ HDprintf(" MPI_Init failed. Exiting\n");
+ goto finish;
+ }
+#endif
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
/* Attempt to turn off atexit post processing so that in case errors
* occur during the test and the process is aborted, it will not hang
@@ -3985,9 +4256,12 @@ main(int argc, char **argv)
HDprintf("\nAllocation and initialize of file image buffers failed. Test aborted.\n");
}
- MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(comm);
+
+ if (mpi_rank == 0) {
- // sleep(60);
+ HDprintf("\n\n --- TESTING MPIO VFD --- \n\n");
+ }
nerrs +=
vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
@@ -4056,19 +4330,118 @@ main(int argc, char **argv)
nerrs +=
vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
-finish:
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ MPI_Barrier(comm);
+#ifdef H5_HAVE_SUBFILING_VFD
+ if (mpi_rank == 0) {
+
+ HDprintf("\n\n --- TESTING SUBFILING VFD --- \n\n");
+ }
+
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+#endif
+
+finish:
/* make sure all processes are finished before final report, cleanup
* and exit.
*/
- MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(comm);
if (mpi_rank == 0) { /* only process 0 reports */
HDprintf("===================================\n");
if (nerrs > 0)
- HDprintf("***parallel vfd tests detected %d failures***\n", nerrs);
+ HDprintf("***vfd tests detected %d failures***\n", nerrs);
else
- HDprintf("parallel vfd tests finished with no failures\n");
+ HDprintf("vfd tests finished with no failures\n");
HDprintf("===================================\n");
}