summaryrefslogtreecommitdiffstats
path: root/testpar/t_vfd.c
diff options
context:
space:
mode:
authormainzer <mainzer#hdfgroup.org>2021-11-17 15:56:47 (GMT)
committermainzer <mainzer#hdfgroup.org>2021-11-17 15:56:47 (GMT)
commitffc254983fde7b409a633d11114bc5cdbd5d1999 (patch)
treefe34e6b60a779c69f3b64b6fea387674e26d9059 /testpar/t_vfd.c
parent9975eaeb21744ea03c8d3a45a496c9c0594428d5 (diff)
downloadhdf5-ffc254983fde7b409a633d11114bc5cdbd5d1999.zip
hdf5-ffc254983fde7b409a633d11114bc5cdbd5d1999.tar.gz
hdf5-ffc254983fde7b409a633d11114bc5cdbd5d1999.tar.bz2
Interim checkin of selection_io_with_subfiling_vfd branch
Moddified testpar/t_vfd.c to test the subfiling vfd with default configuration. Must update this code to run with a variety of configurations -- most particularly multiple IO concentrators, and stripe depth small enough to test the other IO concentrators. testpar/t_vfd.c exposed a large number of race condidtions -- symtoms included: 1) Crashes (usually seg faults) 2) Heap corruption 3) Stack corruption 4) Double frees of heap space 5) Hangs 6) Out of order execution of I/O requests / violations of POSIX semantics 7) Swapped write requests Items 1 - 4 turned out to be primarily caused by file close issues -- specifically, the main I/O concentrator thread and its pool of worker threads were not being shut down properly on file close. Addressing this issue in combination with some other minor fixes seems to have addressed these issues. Items 5 & 6 appear to have been caused by issue of I/O requests to the thread pool in an order that did not maintain POSIX semantics. A rewrite of the I/O request dispatch code appears to have solved these issues. Item 7 seems to have been caused by multiple write requests from a given rank being read by the wrong worker thread. Code to issue "unique" tags for each write request via the ACK message appears to have cleaned this up. Note that the code is still in poor condtition. A partial list of known defects includes: a) Race condiditon on file close that allows superblock writes to arrive at the I/O concentrator after it has been shutdown. This defect is most evident when testpar/t_subfiling_vfd is run with 8 ranks. b) No error reporting from I/O concentrators -- must design and implement this. For now, mostly just asserts, which suggests that it should be run in debug mode. c) Much commented out and/or un-used code. d) Code orgnaization e) Build system with bits of Mercury is awkward -- think of shifting to pthreads with our own thread pool code. f) Need to add native support for vector and selection I/O to the subfiling VFD. g) Need to review, and posibly rework configuration code. h) Need to store subfile configuration data in a superblock extension message, and add code to use this data on file open. i) Test code is inadequate -- expect more issues as it is extended. In particular, there is no unit test code for the I/O request dispatch code. While I think it is correct at present, we need test code to verify this. Similarly, we need to test with multiple I/O concentrators and much smaller stripe depth. My actual code changes were limited to: src/H5FDioc.c src/H5FDioc_threads.c src/H5FDsubfile_int.c src/H5FDsubfile_mpi.c src/H5FDsubfiling.c src/H5FDsubfiling.h src/H5FDsubfiling_priv.h testpar/t_subfiling_vfd.c testpar/t_vfd.c I'm not sure what is going on with the deletions in src/mercury/src/util. Tested parallel/debug on Charis and Jelly
Diffstat (limited to 'testpar/t_vfd.c')
-rw-r--r--testpar/t_vfd.c723
1 files changed, 639 insertions, 84 deletions
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
index 880cc96..2d16606 100644
--- a/testpar/t_vfd.c
+++ b/testpar/t_vfd.c
@@ -18,21 +18,32 @@
*/
#include "testphdf5.h"
+#include "H5FDsubfiling.h"
+#include "H5FDioc.h"
/* Must be a power of 2. Reducing it below 1024 may cause problems */
#define INTS_PER_RANK 1024
/* global variable declarations: */
-hbool_t pass = TRUE; /* set to FALSE on error */
-const char *failure_mssg = NULL;
-
-const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
- "mpio_vfd_test_file_1", /*1*/
- "mpio_vfd_test_file_2", /*2*/
- "mpio_vfd_test_file_3", /*3*/
- "mpio_vfd_test_file_4", /*4*/
- "mpio_vfd_test_file_5", /*5*/
+hbool_t pass = TRUE; /* set to FALSE on error */
+hbool_t disp_failure_mssgs = TRUE; /* global force display of failure messages */
+const char *failure_mssg = NULL;
+
+const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
+ "mpio_vfd_test_file_1", /*1*/
+ "mpio_vfd_test_file_2", /*2*/
+ "mpio_vfd_test_file_3", /*3*/
+ "mpio_vfd_test_file_4", /*4*/
+ "mpio_vfd_test_file_5", /*5*/
+ "mpio_vfd_test_file_6", /*6*/
+ "subfiling_vfd_test_file_0", /*7*/
+ "subfiling_vfd_test_file_1", /*8*/
+ "subfiling_vfd_test_file_2", /*9*/
+ "subfiling_vfd_test_file_3", /*10*/
+ "subfiling_vfd_test_file_4", /*11*/
+ "subfiling_vfd_test_file_5", /*12*/
+ "subfiling_vfd_test_file_6", /*13*/
NULL};
/* File Test Images
@@ -84,6 +95,8 @@ static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -246,7 +259,7 @@ free_file_images(void)
*
* Modifications:
*
- * None.
+ * Updated for subfiling VFD 9/29/30
*
*-------------------------------------------------------------------------
*/
@@ -273,6 +286,20 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* setup the file name -- do this now, since setting up the ioc faple requires it. This will probably
+ * change */
+ if (pass) {
+
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* setupf fapl for target VFD */
if (pass) {
@@ -293,8 +320,103 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
failure_mssg = "Can't set mpio fapl.";
}
}
- else {
+ else if (strcmp(vfd_name, "subfiling") == 0) {
+
+ hid_t ioc_fapl;
+ H5FD_ioc_config_t ioc_config = {{
+ /* common */
+ /* magic = */ H5FD_IOC_FAPL_T_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_T_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* context_id = */ 0, /* will overwrite */
+ /* file_dir = */ "", /* will overwrite */
+ /* file_path = */ "" /* will overwrite */
+ },
+ /* thread_pool_count = */ H5FD_IOC_THREAD_POOL_SIZE};
+ H5FD_subfiling_config_t subfiling_conf = {
+ {
+ /* common */
+ /* magic = */ H5FD_IOC_FAPL_T_MAGIC,
+ /* version = */ H5FD_CURR_IOC_FAPL_T_VERSION,
+ /* stripe_count = */ 0, /* will over write */
+ /* stripe_depth = */ (INTS_PER_RANK / 2),
+ /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
+ /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
+ /* context_id = */ 0, /* will overwrite */
+ /* file_dir = */ "", /* will overwrite */
+ /* file_path = */ "", /* will overwrite */
+ },
+ /* require_ioc = */ TRUE};
+
+ if ((pass) && ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't create ioc fapl.";
+ }
+
+#if 1 /* JRM */ /* this is temporary -- rework for programatic control later */
+ memset(&ioc_config, 0, sizeof(ioc_config));
+ memset(&subfiling_conf, 0, sizeof(subfiling_conf));
+
+ /* Get subfiling VFD defaults */
+ if ((pass) && (H5Pget_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get sub-filing VFD defaults.";
+ }
+
+ if ((pass) && (subfiling_conf.require_ioc)) {
+
+ /* Get IOC VFD defaults */
+ if ((pass) && ((H5Pget_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get IOC VFD defaults.";
+ }
+
+ /* Now we can set the IOC fapl. */
+ if ((pass) && ((H5Pset_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set IOC fapl.";
+ }
+ }
+ else {
+
+ if ((pass) && ((H5Pset_fapl_sec2(ioc_fapl) == FAIL))) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set sec2 fapl.";
+ }
+ }
+
+ /* Assign the IOC fapl as the underlying VPD */
+ subfiling_conf.common.ioc_fapl_id = ioc_fapl;
+
+ if (pass) { /* setup the paths in the subfiling fapl. */
+
+ HDassert(strlen(filename) < sizeof(subfiling_conf.common.file_dir));
+ strcpy(subfiling_conf.common.file_dir, dirname(filename));
+ strcpy(subfiling_conf.common.file_path, basename(filename));
+#if 0 /* JRM */
+ HDfprintf(stdout, "\nfilename = \"%s\"\nfile_dir = \"%s\"\nfile_path = \"%s\"\n",
+ filename, subfiling_conf.common.file_dir, subfiling_conf.common.file_path);
+#endif /* JRM */
+ }
+ /* Now we can set the SUBFILING fapl befor returning. */
+ if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set subfiling fapl.";
+ }
+
+#endif /* JRM */
+ }
+ else {
pass = FALSE;
failure_mssg = "un-supported VFD";
}
@@ -705,11 +827,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -725,7 +843,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1016,11 +1134,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 10) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1036,7 +1150,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1361,11 +1475,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1381,7 +1491,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -1836,11 +1946,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1856,7 +1962,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2116,11 +2222,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2136,7 +2238,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2198,7 +2300,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[1];
haddr_t addrs[1];
size_t sizes[1];
- void * bufs[1];
+ const void *bufs[1];
pass = TRUE;
@@ -2268,6 +2370,9 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -2305,11 +2410,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2325,7 +2426,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2397,7 +2498,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[1];
haddr_t addrs[1];
size_t sizes[1];
- void * bufs[1];
+ const void *bufs[1];
pass = TRUE;
@@ -2514,6 +2615,9 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -2569,11 +2673,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 6) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2589,7 +2689,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2662,7 +2762,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[4];
haddr_t addrs[4];
size_t sizes[4];
- void * bufs[4];
+ const void *bufs[4];
pass = TRUE;
@@ -2765,6 +2865,9 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -2845,11 +2948,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2865,7 +2964,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -2944,7 +3043,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[4];
haddr_t addrs[4];
size_t sizes[4];
- void * bufs[4];
+ const void *bufs[4];
pass = TRUE;
@@ -3047,6 +3146,9 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -3127,11 +3229,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 5) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3147,7 +3245,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3262,7 +3360,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[4];
haddr_t addrs[4];
size_t sizes[4];
- void * bufs[4];
+ const void *bufs[4];
pass = TRUE;
@@ -3451,6 +3549,9 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -3490,6 +3591,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.1)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
+#endif /* JRM */
}
}
else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) {
@@ -3498,6 +3603,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.2)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
+#endif /* JRM */
}
}
else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) {
@@ -3506,6 +3615,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (1.3)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
+#endif /* JRM */
}
}
else {
@@ -3525,6 +3638,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.1)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
+#endif /* JRM */
}
}
else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) {
@@ -3533,6 +3650,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (2.2)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ decreasing_fi_buf[j]);
+#endif /* JRM */
}
}
else {
@@ -3552,6 +3673,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
pass = FALSE;
failure_mssg = "unexpected data read from file (3.1)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ negative_fi_buf[j]);
+#endif /* JRM */
}
}
else {
@@ -3586,11 +3711,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 7) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
-
- if (pass) {
-
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
- }
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3606,7 +3727,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3694,7 +3815,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5FD_mem_t types[(INTS_PER_RANK / 16) + 1];
haddr_t addrs[(INTS_PER_RANK / 16) + 1];
size_t sizes[2];
- void * bufs[(INTS_PER_RANK / 16) + 1];
+ const void *bufs[(INTS_PER_RANK / 16) + 1];
pass = TRUE;
@@ -3811,6 +3932,9 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (pass) {
MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
}
if (show_progress)
@@ -3865,15 +3989,290 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
/* 8) Close the test file and delete it (on rank 0 only).
* Close FAPL and DXPL.
*/
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if ((disp_failure_mssgs) || (show_progress)) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_6() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_7()
+ *
+ * Purpose: Test vector I/O with larger vectors -- 8 elements in each
+ * vector for now.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ *
+ * 3) Barrier
+ *
+ * 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector of length 8, with each element of
+ * length INTS_PER_RANK / 16, and base address
+ * base_addr + i * (INTS_PER_RANK / 8), where i is
+ * the index of the entry (starting at zero). Draw
+ * written data from the equivalent locations in
+ * increasing_fi_buf.
+ *
+ * Write the vector.
+ *
+ * 5) Barrier
+ *
+ * 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, and increasing_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 7) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 10/10/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_7()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ haddr_t addr_increment;
+ int base_index;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int k;
+ uint32_t count;
+ H5FD_mem_t types[8];
+ haddr_t addrs[8];
+ size_t sizes[8];
+ const void *bufs[8];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ */
+ if (pass) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if (pass) {
+
+ base_index = mpi_rank * INTS_PER_RANK;
+ base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t));
+ addr_increment = (haddr_t)((INTS_PER_RANK / 8) * sizeof(int32_t));
+
+ count = 8;
+
+ for (i = 0; i < (int)count; i++) {
+
+ types[i] = H5FD_MEM_DRAW;
+ addrs[i] = base_addr + ((haddr_t)(i)*addr_increment);
+ sizes[i] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t);
+ bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * (INTS_PER_RANK / 8))]));
+
+#if 0 /* JRM */ /* delete eventually */
+ HDfprintf(stderr, "\naddrs[%d] = %lld\n", i, (long long)(addrs[i]));
+#endif /* JRM */
+ }
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+#if 0 /* JRM */ /* test code -- remove before commit */
+ sleep(1);
+#endif /* JRM */
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
if (pass) {
- takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ k = j - base_index;
+
+ if ((k % (INTS_PER_RANK / 8)) < (INTS_PER_RANK / 16)) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
+ increasing_fi_buf[j]);
+#endif /* JRM */
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+#if 1 /* JRM */
+ HDprintf("\nread_fi_buf[%d] = %d, 0 expected.\n", j, read_fi_buf[j]);
+#endif /* JRM */
+ }
+ }
+ }
+ }
}
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* 7) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
/* report results */
if (mpi_rank == 0) {
@@ -3885,7 +4284,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
H5_FAILED();
- if (show_progress) {
+ if ((disp_failure_mssgs) || (show_progress)) {
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -3893,7 +4292,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
return (!pass);
-} /* vector_write_test_6() */
+} /* vector_write_test_7() */
/*-------------------------------------------------------------------------
* Function: main
@@ -3915,13 +4314,25 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
int
main(int argc, char **argv)
{
- unsigned nerrs = 0;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
+ unsigned nerrs = 0;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int required = MPI_THREAD_MULTIPLE;
+ int provided = 0;
int mpi_size;
int mpi_rank;
+#if 0 /* JRM */
MPI_Init(&argc, &argv);
+#else /* JRM */
+ MPI_Init_thread(&argc, &argv, required, &provided);
+
+ if (provided != required) {
+
+ HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n");
+ goto finish;
+ }
+#endif /* JRM */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -3955,9 +4366,18 @@ main(int argc, char **argv)
HDprintf("\nAllocation and initialze of file image buffers failed. Test aborted.\n");
}
+#if 1 /* JRM */
+ /* sleep for a bit to allow GDB to attach to the process */
+ // sleep(60);
+#endif /* JRM */
+
MPI_Barrier(MPI_COMM_WORLD);
- // sleep(60);
+#if 1 /* JRM */ /* skip MPIO VFD tests if desired. */
+ if (mpi_rank == 0) {
+
+ HDprintf("\n\n --- TESTING MPIO VFD --- \n\n");
+ }
nerrs +=
vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
@@ -4026,6 +4446,141 @@ main(int argc, char **argv)
nerrs +=
vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+#endif /* JRM */
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (mpi_rank == 0) {
+
+ HDprintf("\n\n --- TESTING SUBFILING VFD --- \n\n");
+ }
+
+ nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "subfiling");
+ // sleep(1);
+
+ nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "subfiling");
+ // sleep(1);
+
+ nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "subfiling");
+ // sleep(1);
+ nerrs +=
+ vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "subfiling");
+ // sleep(1);
+
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO,
+ "subfiling");
+ // sleep(1);
+ nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO,
+ "subfiling");
+ // sleep(1);
+
finish:
/* make sure all processes are finished before final report, cleanup