summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Warren <Richard.Warren@hdfgroup.org>2017-10-11 20:22:50 (GMT)
committerRichard Warren <Richard.Warren@hdfgroup.org>2017-10-11 20:22:50 (GMT)
commit3dde6d0e32461f46630f814a2fdfbd4c813703bf (patch)
tree94b0665603ce38ed64a995e1cdb18d0e0dc53b65
parent157398107e334e3dafbdcd25f34da391510e45f2 (diff)
downloadhdf5-3dde6d0e32461f46630f814a2fdfbd4c813703bf.zip
hdf5-3dde6d0e32461f46630f814a2fdfbd4c813703bf.tar.gz
hdf5-3dde6d0e32461f46630f814a2fdfbd4c813703bf.tar.bz2
Updated the code and RELEASE.txt note per comments from John Mainzer
-rw-r--r--release_docs/RELEASE.txt10
-rw-r--r--testpar/t_pread.c84
2 files changed, 48 insertions, 46 deletions
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index ed1b6cc..e561983 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -64,11 +64,11 @@ New Features
-----------------
- Optimize parallel open/location of the HDF5 super-block
- Previous releases of PHDF5 allow all parallel ranks to
- read the starting elements in a file to validate and process
- the HDF5 super-block. As this is accomplished more or less as
- a synchronous operation, a large number of processes will
- likely experience a slowdown due to filesystem contention.
+ Previous releases of PHDF5 required all parallel ranks to
+ search for the HDF5 superblock signature when opening the
+ file. As this is accomplished more or less as a synchronous
+ operation, a large number of processes can experience a
+ slowdown in the file open due to filesystem contention.
As a first step in improving the startup/file-open performance,
we allow MPI rank 0 of the associated MPI communicator to locate
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index f0cad3d..7f23b9b 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -16,7 +16,6 @@
*
*/
-
#include "h5test.h"
#include "testpar.h"
@@ -87,7 +86,11 @@ static int test_parallel_read(MPI_Comm comm, int mpi_rank, int group);
*
* Failure: 1
*
+ * Programmer: Richard Warren
+ * 10/1/17
*
+ * Modifications:
+ *
*-------------------------------------------------------------------------
*/
static int
@@ -758,17 +761,17 @@ main( int argc, char **argv)
if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
- HDexit(FAIL);
+ HDexit(EXIT_FAILURE);
}
if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
- HDexit(FAIL);
+ HDexit(EXIT_FAILURE);
}
if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
- HDexit(FAIL);
+ HDexit(EXIT_FAILURE);
}
H5open();
@@ -789,26 +792,53 @@ main( int argc, char **argv)
goto finish;
}
- /* ------ Test 1 of 2 ------
- * In this test we utilize all processes which makeup MPI_COMM_WORLD.
- * We generate the test file which we'll shortly try to read.
+ /* ------ Create two (2) MPI groups ------
+ *
+ * We split MPI_COMM_WORLD into 2 more or less equal sized
+ * groups. The resulting communicators will be used to generate
+ * two HDF files which in turn will be opened in parallel and the
+ * contents verified in the second read test below.
*/
+ split_size = mpi_size / 2;
+ which_group = (mpi_rank < split_size ? 0 : 1);
+
+ if ( (MPI_Comm_split(MPI_COMM_WORLD,
+ which_group,
+ 0,
+ &group_comm)) != MPI_SUCCESS) {
+
+ HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* ------ Generate all files ------ */
+
+ /* We generate the file used for test 1 */
+ nerrs += generate_test_file( MPI_COMM_WORLD, mpi_rank, which_group );
+
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test(1) file construction failed -- skipping tests.\n");
+ }
+ goto finish;
+ }
+
+ /* We generate the file used for test 2 */
nerrs += generate_test_file( group_comm, mpi_rank, which_group );
- /* abort tests if there were any errors in test file construction */
if ( nerrs > 0 ) {
if ( mpi_rank == 0 ) {
- HDprintf(" Test file construction failed -- skipping tests.\n");
+ HDprintf(" Test(2) file construction failed -- skipping tests.\n");
}
goto finish;
}
/* Now read the generated test file (stil using MPI_COMM_WORLD) */
- nerrs += test_parallel_read( group_comm, mpi_rank, which_group);
+ nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, which_group);
if ( nerrs > 0 ) {
if ( mpi_rank == 0 ) {
- HDprintf(" Parallel read test failed -- skipping tests.\n");
+ HDprintf(" Parallel read test(1) failed -- skipping tests.\n");
}
goto finish;
}
@@ -819,40 +849,12 @@ main( int argc, char **argv)
HDprintf(" -- Starting multi-group parallel read test.\n");
}
- /* ------ Test 2 of 2 ------
- * Create two more or less equal MPI groups to
- * initialize the test files and then verify that parallel
- * operations by independent group succeeds.
- */
-
- split_size = mpi_size / 2;
- which_group = (mpi_rank < split_size ? 0 : 1);
-
- if ( (MPI_Comm_split(MPI_COMM_WORLD,
- which_group,
- 0,
- &group_comm)) != MPI_SUCCESS) {
-
- HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
- HDexit(FAIL);
- }
-
- nerrs += generate_test_file( group_comm, mpi_rank, which_group );
-
- /* abort tests if there were any errors in test file construction */
- if ( nerrs > 0 ) {
- if ( mpi_rank == 0 ) {
- HDprintf(" Test file construction failed -- skipping tests.\n");
- }
- goto finish;
- }
-
/* run the 2nd set of tests */
nerrs += test_parallel_read(group_comm, mpi_rank, which_group);
if ( nerrs > 0 ) {
if ( mpi_rank == 0 ) {
- HDprintf(" Multi-group read test failed\n");
+ HDprintf(" Multi-group read test(2) failed\n");
}
goto finish;
}
@@ -897,6 +899,6 @@ finish:
MPI_Finalize();
/* cannot just return (nerrs) because exit code is limited to 1byte */
- return((nerrs > 0) ? FAIL : SUCCEED );
+ return((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS );
} /* main() */