summaryrefslogtreecommitdiffstats
path: root/testpar/t_vfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'testpar/t_vfd.c')
-rw-r--r--testpar/t_vfd.c933
1 files changed, 653 insertions, 280 deletions
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
index ad296ad..49c2a42 100644
--- a/testpar/t_vfd.c
+++ b/testpar/t_vfd.c
@@ -15,22 +15,41 @@
* This file is a catchall for parallel VFD tests.
*/
+#include <libgen.h>
+
#include "testphdf5.h"
+#ifdef H5_HAVE_SUBFILING_VFD
+#include "H5FDsubfiling.h"
+#include "H5FDioc.h"
+#endif
+
/* Must be a power of 2. Reducing it below 1024 may cause problems */
#define INTS_PER_RANK 1024
/* global variable declarations: */
-hbool_t pass = TRUE; /* set to FALSE on error */
-const char *failure_mssg = NULL;
-
-const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
- "mpio_vfd_test_file_1", /*1*/
- "mpio_vfd_test_file_2", /*2*/
- "mpio_vfd_test_file_3", /*3*/
- "mpio_vfd_test_file_4", /*4*/
- "mpio_vfd_test_file_5", /*5*/
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+
+hbool_t pass = TRUE; /* set to FALSE on error */
+hbool_t disp_failure_mssgs = TRUE; /* global force display of failure messages */
+const char *failure_mssg = NULL;
+
+const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
+ "mpio_vfd_test_file_1", /*1*/
+ "mpio_vfd_test_file_2", /*2*/
+ "mpio_vfd_test_file_3", /*3*/
+ "mpio_vfd_test_file_4", /*4*/
+ "mpio_vfd_test_file_5", /*5*/
+ "mpio_vfd_test_file_6", /*6*/
+ "subfiling_vfd_test_file_0", /*7*/
+ "subfiling_vfd_test_file_1", /*8*/
+ "subfiling_vfd_test_file_2", /*9*/
+ "subfiling_vfd_test_file_3", /*10*/
+ "subfiling_vfd_test_file_4", /*11*/
+ "subfiling_vfd_test_file_5", /*12*/
+ "subfiling_vfd_test_file_6", /*13*/
NULL};
/* File Test Images
@@ -82,6 +101,8 @@ static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -244,7 +265,7 @@ free_file_images(void)
*
* Modifications:
*
- * None.
+ * Updated for subfiling VFD 9/29/30
*
*-------------------------------------------------------------------------
*/
@@ -271,6 +292,20 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* setup the file name -- do this now, since setting up the ioc faple requires it. This will probably
+ * change */
+ if (pass) {
+
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* setupf fapl for target VFD */
if (pass) {
@@ -283,16 +318,101 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (pass) {
- if (strcmp(vfd_name, "mpio") == 0) {
+ if (HDstrcmp(vfd_name, "mpio") == 0) {
- if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) {
+ if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) {
pass = FALSE;
failure_mssg = "Can't set mpio fapl.";
}
}
- else {
+#ifdef H5_HAVE_SUBFILING_VFD
+ else if (HDstrcmp(vfd_name, H5FD_SUBFILING_NAME) == 0) {
+
+ hid_t ioc_fapl;
+ H5FD_ioc_config_t ioc_config = {/* magic = */ H5FD_IOC_FAPL_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* thread_pool_count = */ H5FD_IOC_THREAD_POOL_SIZE};
+ H5FD_subfiling_config_t subfiling_conf = {
+ /* magic = */ H5FD_IOC_FAPL_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* require_ioc = */ TRUE};
+
+ if ((pass) && ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't create ioc fapl.";
+ }
+
+ /* set the MPI communicator and info in the FAPL */
+ if (H5Pset_mpi_params(ioc_fapl, comm, info) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set MPI communicator and info in IOC fapl.";
+ }
+
+ /* set the MPI communicator and info in the FAPL */
+ if (H5Pset_mpi_params(fapl_id, comm, info) < 0) {
+ pass = FALSE;
+ failure_mssg = "Can't set MPI communicator and info in subfiling fapl.";
+ }
+
+ HDmemset(&ioc_config, 0, sizeof(ioc_config));
+ HDmemset(&subfiling_conf, 0, sizeof(subfiling_conf));
+
+ /* Get subfiling VFD defaults */
+ if ((pass) && (H5Pget_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get sub-filing VFD defaults.";
+ }
+
+ if ((pass) && (subfiling_conf.require_ioc)) {
+
+ /* Get IOC VFD defaults */
+ if ((pass) && ((H5Pget_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get IOC VFD defaults.";
+ }
+
+ /* Now we can set the IOC fapl. */
+ if ((pass) && ((H5Pset_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set IOC fapl.";
+ }
+ }
+ else {
+
+ if ((pass) && ((H5Pset_fapl_sec2(ioc_fapl) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set sec2 fapl.";
+ }
+ }
+
+ /* Assign the IOC fapl as the underlying VPD */
+ subfiling_conf.ioc_fapl_id = ioc_fapl;
+
+ /* Now we can set the SUBFILING fapl before returning. */
+ if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set subfiling fapl.";
+ }
+ }
+#endif
+ else {
pass = FALSE;
failure_mssg = "un-supported VFD";
}
@@ -383,7 +503,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (pass) { /* setup pointers with return values */
- strncpy(file_name, filename, 512);
+ HDstrncpy(file_name, filename, 512);
*lf_ptr = lf;
*fapl_id_ptr = fapl_id;
*dxpl_id_ptr = dxpl_id;
@@ -455,21 +575,21 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
/* 6) On rank 0, delete the test file.
*/
- if (pass) {
+ /* wait for everyone to close the file */
+ MPI_Barrier(comm);
- /* wait for everyone to close the file */
- MPI_Barrier(MPI_COMM_WORLD);
+ if (pass) {
if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
pass = FALSE;
failure_mssg = "HDremove() failed.\n";
}
-
- /* wait for the file delete to complete */
- MPI_Barrier(MPI_COMM_WORLD);
}
+ /* wait for the file delete to complete */
+ MPI_Barrier(comm);
+
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -571,20 +691,20 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -626,11 +746,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -694,11 +810,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -706,11 +818,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -726,7 +834,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -820,20 +928,20 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -875,11 +983,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -930,11 +1034,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 6) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1008,11 +1108,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 9) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1020,12 +1116,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 10) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
-
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1040,7 +1131,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1145,20 +1236,20 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1200,11 +1291,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1356,11 +1443,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1368,11 +1451,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1388,7 +1467,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1517,20 +1596,20 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1572,11 +1651,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1834,11 +1909,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1846,11 +1917,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1866,7 +1933,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1963,20 +2030,20 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2018,11 +2085,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2117,11 +2180,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2129,11 +2188,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2148,8 +2203,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
else {
H5_FAILED();
-
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2219,20 +2273,20 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 1 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2280,11 +2334,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2321,11 +2371,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2341,7 +2387,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2421,20 +2467,20 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 2 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2529,11 +2575,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 4) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2588,11 +2630,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2608,7 +2646,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2689,20 +2727,20 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 3 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -2761,16 +2799,6 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
sizes[3] = bytes_per_write;
bufs[3] = (const void *)(&(zero_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
-#if 0 /* JRM */
- HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
- (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
- HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
- (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
- HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
- (unsigned long long)bufs[0], (unsigned long long)bufs[1],
- (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
-#endif /* JRM */
-
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
pass = FALSE;
@@ -2783,11 +2811,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2867,11 +2891,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2887,7 +2907,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2974,20 +2994,20 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 4 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3047,16 +3067,6 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
bufs[3] =
(const void *)(&(increasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
-#if 0 /* JRM */
- HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
- (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
- HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
- (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
- HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
- (unsigned long long)bufs[0], (unsigned long long)bufs[1],
- (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
-#endif /* JRM */
-
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
pass = FALSE;
@@ -3069,11 +3079,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3153,11 +3159,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3173,7 +3175,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3296,20 +3298,20 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 5 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3356,11 +3358,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 3) Barrier
*/
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3476,11 +3474,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3519,6 +3513,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
}
}
else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) {
@@ -3527,6 +3524,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
}
}
else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) {
@@ -3535,6 +3535,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.3)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
}
}
else {
@@ -3554,6 +3557,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
}
}
else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) {
@@ -3562,6 +3568,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
}
}
else {
@@ -3581,6 +3590,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (3.1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
}
}
else {
@@ -3615,11 +3627,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 7) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3635,7 +3643,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3731,20 +3739,20 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / ind I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name);
}
else {
HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / col I/O",
- vfd_name);
+ HDsnprintf(test_title, sizeof(test_title),
+ "parallel vector write test 6 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -3785,11 +3793,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
}
/* 3) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3839,11 +3843,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Barrier */
-
- if (pass) {
-
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ MPI_Barrier(comm);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3885,27 +3885,278 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 7) Barrier */
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if ((disp_failure_mssgs) || (show_progress)) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+ return (!pass);
+
+} /* vector_write_test_6() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_7()
+ *
+ * Purpose: Test vector I/O with larger vectors -- 8 elements in each
+ * vector for now.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ *
+ * 3) Barrier
+ *
+ * 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector of length 8, with each element of
+ * length INTS_PER_RANK / 16, and base address
+ * base_addr + i * (INTS_PER_RANK / 8), where i is
+ * the index of the entry (starting at zero). Draw
+ * written data from the equivalent locations in
+ * increasing_fi_buf.
+ *
+ * Write the vector.
+ *
+ * 5) Barrier
+ *
+ * 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, and increasing_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 7) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 10/10/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_7()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ haddr_t addr_increment;
+ int base_index;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int k;
+ uint32_t count;
+ H5FD_mem_t types[8];
+ haddr_t addrs[8];
+ size_t sizes[8];
+ const void *bufs[8];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
if (pass) {
- MPI_Barrier(MPI_COMM_WORLD);
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
}
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 8) Close the test file and delete it (on rank 0 only).
- * Close FAPL and DXPL.
+ /* 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ */
+ if (pass) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
*/
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if (pass) {
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ base_index = mpi_rank * INTS_PER_RANK;
+ base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t));
+ addr_increment = (haddr_t)((INTS_PER_RANK / 8) * sizeof(int32_t));
+
+ count = 8;
+
+ for (i = 0; i < (int)count; i++) {
+
+ types[i] = H5FD_MEM_DRAW;
+ addrs[i] = base_addr + ((haddr_t)(i)*addr_increment);
+ sizes[i] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t);
+ bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * (INTS_PER_RANK / 8))]));
+ }
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
}
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* 5) Barrier */
+ MPI_Barrier(comm);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ k = j - base_index;
+
+ if ((k % (INTS_PER_RANK / 8)) < (INTS_PER_RANK / 16)) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+
+ HDprintf("\nread_fi_buf[%d] = %d, 0 expected.\n", j, read_fi_buf[j]);
+ }
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* report results */
if (mpi_rank == 0) {
@@ -3917,7 +4168,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3925,7 +4176,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
return (!pass);
-} /* vector_write_test_6() */
+} /* vector_write_test_7() */
/*-------------------------------------------------------------------------
* Function: main
@@ -3948,12 +4199,32 @@ int
main(int argc, char **argv)
{
unsigned nerrs = 0;
- int mpi_size;
- int mpi_rank;
+#ifdef H5_HAVE_SUBFILING_VFD
+ int required = MPI_THREAD_MULTIPLE;
+ int provided = 0;
+#endif
+ int mpi_size;
+ int mpi_rank;
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+#ifdef H5_HAVE_SUBFILING_VFD
+ if (MPI_SUCCESS != MPI_Init_thread(&argc, &argv, required, &provided)) {
+ HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n");
+ goto finish;
+ }
+
+ if (provided != required) {
+ HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n");
+ goto finish;
+ }
+#else
+ if (MPI_SUCCESS != MPI_Init(&argc, &argv)) {
+ HDprintf(" MPI_Init failed. Exiting\n");
+ goto finish;
+ }
+#endif
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
/* Attempt to turn off atexit post processing so that in case errors
* occur during the test and the process is aborted, it will not hang
@@ -3985,9 +4256,12 @@ main(int argc, char **argv)
HDprintf("\nAllocation and initialize of file image buffers failed. Test aborted.\n");
}
- MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(comm);
+
+ if (mpi_rank == 0) {
- // sleep(60);
+ HDprintf("\n\n --- TESTING MPIO VFD --- \n\n");
+ }
nerrs +=
vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
@@ -4056,19 +4330,118 @@ main(int argc, char **argv)
nerrs +=
vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
-finish:
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ MPI_Barrier(comm);
+#ifdef H5_HAVE_SUBFILING_VFD
+ if (mpi_rank == 0) {
+
+ HDprintf("\n\n --- TESTING SUBFILING VFD --- \n\n");
+ }
+
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ H5FD_SUBFILING_NAME);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ H5FD_SUBFILING_NAME);
+#endif
+
+finish:
/* make sure all processes are finished before final report, cleanup
* and exit.
*/
- MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(comm);
if (mpi_rank == 0) { /* only process 0 reports */
HDprintf("===================================\n");
if (nerrs > 0)
- HDprintf("***parallel vfd tests detected %d failures***\n", nerrs);
+ HDprintf("***vfd tests detected %d failures***\n", nerrs);
else
- HDprintf("parallel vfd tests finished with no failures\n");
+ HDprintf("vfd tests finished with no failures\n");
HDprintf("===================================\n");
}