summaryrefslogtreecommitdiffstats
path: root/testpar/t_pread.c
diff options
context:
space:
mode:
authormainzer <mainzer#hdfgroup.org>2017-10-05 22:25:57 (GMT)
committermainzer <mainzer#hdfgroup.org>2017-10-05 22:25:57 (GMT)
commitceab5a51766f3e32d02bc65d59374c79910483bd (patch)
tree957473f2ecf2c41cf0a140538bb76166c05555d2 /testpar/t_pread.c
parentda0f8223df7d37f3ae31da286a86fe62d58f6ea0 (diff)
downloadhdf5-ceab5a51766f3e32d02bc65d59374c79910483bd.zip
hdf5-ceab5a51766f3e32d02bc65d59374c79910483bd.tar.gz
hdf5-ceab5a51766f3e32d02bc65d59374c79910483bd.tar.bz2
Edits to the file open optimization and associated test
code to bring them closer to the HDF5 library's unwritten coding standards. Also bug fix to repair a hang in testphdf5. Tested parallel/debug on Charis and Jelly, parallel/production on Jelly.
Diffstat (limited to 'testpar/t_pread.c')
-rw-r--r--testpar/t_pread.c835
1 files changed, 666 insertions, 169 deletions
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 4512185..ecc7360 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -1,196 +1,693 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include "mpi.h"
-#include "hdf5.h"
+/*
+ * Collective file open optimization tests
+ *
+ */
-static char *random_hdf5_text =
- "Now is the time for all first-time-users of HDF5 to read their manual or go thru the tutorials!\n\
-While you\'re at it, now is also the time to read up on MPI-IO.";
-static char *datafile_relocated = "relocated_super.h5";
+#include "h5test.h"
+#include "testpar.h"
+
+#define NFILENAME 3
+const char *FILENAMES[NFILENAME + 1]={"t_pread_data_file",
+ "reloc_t_pread_data_file",
+ "prefix_file",
+ NULL};
+#define FILENAME_BUF_SIZE 1024
+
+#define COUNT 1000
+
hbool_t pass = true;
+static const char *random_hdf5_text =
+"Now is the time for all first-time-users of HDF5 to read their \
+manual or go thru the tutorials!\n\
+While you\'re at it, now is also the time to read up on MPI-IO.";
+static int generate_test_file(int mpi_rank, int mpi_size);
+static int test_parallel_read(int mpi_rank);
-static void
+
+/*-------------------------------------------------------------------------
+ * Function: generate_test_file
+ *
+ * Purpose: *** Richard -- please fill this in ***
+ *
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
generate_test_file( int mpi_rank, int mpi_size )
{
- FILE *header;
- char *datafile_base = "mytestfile.h5";
- char *prologue_file = "hdf5_readme.txt";
- hid_t file_id, memspace, filespace, attr_id, fapl_id, dxpl_id, dset_id;
- hsize_t i, offset, count = 1000;
- hsize_t dims[1] = {0};
- float nextValue, data_slice[count];
-
- pass = true;
-
- nextValue = (float)(mpi_rank * count);
- for(i=0; i<count; i++) {
- data_slice[i] = nextValue;
- nextValue += 1;
- }
+ FILE *header;
+ const char *fcn_name = "generate_test_file()";
+ const char *failure_mssg = NULL;
+ char data_filename[FILENAME_BUF_SIZE];
+ char reloc_data_filename[FILENAME_BUF_SIZE];
+ char prolog_filename[FILENAME_BUF_SIZE];
+ int local_failure = 0;
+ int global_failures = 0;
+ hsize_t count = COUNT;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t dims[1] = {0};
+ hid_t file_id;
+ hid_t memspace;
+ hid_t filespace;
+ hid_t fapl_id;
+ hid_t dxpl_id;
+ hid_t dset_id;
+ float nextValue;
+ float *data_slice = NULL;
- /* create the file (parallel) */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL);
- file_id = H5Fcreate(datafile_base, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- if ( file_id < 0 ) {
- pass = false;
- HDfprintf(stderr, "FATAL: H5Fcreate failed!\n");
- }
+ pass = true;
- if ( pass ) {
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- }
+ if ( mpi_rank == 0 ) {
- dims[0] = count;
- memspace = H5Screate_simple(1, dims, NULL);
- dims[0] *= mpi_size;
- filespace = H5Screate_simple(1, dims, NULL);
- offset = mpi_rank * count;
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL);
-
- dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- if ( dset_id < 0 ) {
- pass = false;
- HDfprintf(stderr, "FATAL: H5Dcreate2 failed!\n");
- }
+ HDfprintf(stdout, "Constructing test files...");
+ }
- if ( pass ) {
- H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice);
- }
- H5Dclose(dset_id);
- H5Sclose(memspace);
- H5Sclose(filespace);
-
- /* finished creating a data file */
- H5Fclose(file_id);
- H5Pclose(dxpl_id);
-
- if ( mpi_rank > 0 ) return;
-
- /* ---- mpi_rank 0 ------*/
- header = fopen( prologue_file, "w+");
- if (header == NULL) {
- pass = false;
- HDfprintf(stderr, "FATAL: Unable to create a simple txt file\n");
- return;
- }
- else {
- size_t bytes_written, bytes_to_write = strlen(random_hdf5_text);
- bytes_written = fwrite( random_hdf5_text, 1, bytes_to_write , header);
- if (bytes_written == 0) {
- pass = false;
- HDfprintf(stderr, "FATAL: Unable to write a simple txt file\n");
- }
- fclose(header);
- }
+ /* setup the file names */
+ if ( pass ) {
+ HDassert(FILENAMES[0]);
- if ( pass ) {
- char cmd[256];
- sprintf(cmd, "../tools/src/h5jam/h5jam -i %s -u %s -o %s",
- datafile_base, prologue_file, datafile_relocated);
- system(cmd);
- unlink(datafile_base);
- unlink(prologue_file);
- }
-}
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, data_filename,
+ sizeof(data_filename)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "h5_fixname(0) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ HDassert(FILENAMES[1]);
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, reloc_data_filename,
+ sizeof(reloc_data_filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname(1) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ HDassert(FILENAMES[2]);
+
+ if ( h5_fixname(FILENAMES[2], H5P_DEFAULT, prolog_filename,
+ sizeof(prolog_filename)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "h5_fixname(2) failed.\n";
+ }
+ }
+
+ /* setup data to write */
+ if ( pass ) {
+ if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ nextValue = (float)(mpi_rank * COUNT);
+
+ for(i=0; i<COUNT; i++) {
+ data_slice[i] = nextValue;
+ nextValue += 1;
+ }
+ }
+
+ /* setup FAPL */
+ if ( pass ) {
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_fapl_mpio() failed\n";
+ }
+ }
+
+ /* create the data file */
+ if ( pass ) {
+ if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
+ H5P_DEFAULT, fapl_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* create and write the dataset */
+ if ( pass ) {
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ dims[0] = COUNT;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n";
+ }
+ }
+
+ if ( pass ) {
+ dims[0] *= (hsize_t)mpi_size;
+ if ( (filespace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n";
+ }
+ }
+
+ if ( pass ) {
+ offset = (hsize_t)mpi_rank * (hsize_t)COUNT;
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
+ NULL, &count, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
+ filespace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dcreate2() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
+ filespace, dxpl_id, data_slice)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dwrite() failed.\n";
+ }
+ }
+
+ /* close file, etc. */
+ if ( pass ) {
+ if ( H5Dclose(dset_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Fclose(file_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Fclose(file_id) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Pclose(dxpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Pclose(fapl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(fapl_id) failed.\n";
+ }
+ }
+
+ /* add a userblock to the head of the datafile.
+ * We will use this to for a functional test of the
+ * file open optimization.
+ *
+ * Also delete files that are no longer needed.
+ */
+ if ( mpi_rank == 0 ) {
+
+ size_t bytes_to_write;
+
+ bytes_to_write = strlen(random_hdf5_text);
+
+ if ( pass ) {
+ if ( (header = HDfopen(prolog_filename, "w+")) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "HDfopen(prolog_filename, \"w+\") failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ bytes_to_write = strlen(random_hdf5_text);
+ if ( HDfwrite(random_hdf5_text, 1, bytes_to_write, header) !=
+ bytes_to_write ) {
+ pass = FALSE;
+ failure_mssg = "Unable to write header file.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( HDfclose(header) != 0 ) {
+ pass = FALSE;
+ failure_mssg = "HDfclose() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ char cmd[256];
+
+ HDsprintf(cmd, "../tools/src/h5jam/h5jam -i %s -u %s -o %s",
+ data_filename, prolog_filename, reloc_data_filename);
+
+ if ( system(cmd) != 0 ) {
+ pass = FALSE;
+ failure_mssg = "invocation of h5jam failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ HDremove(prolog_filename);
+ HDremove(data_filename);
+ }
+ }
+
+ /* collect results from other processes.
+ * Only overwrite the failure message if no preveious error
+ * has been detected
+ */
+ local_failure = ( pass ? 0 : 1 );
+
+ if ( MPI_Allreduce(&local_failure, &global_failures, 1,
+ MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
+ if ( pass ) {
+ pass = FALSE;
+ failure_mssg = "MPI_Allreduce() failed.\n";
+ }
+ } else if ( ( pass ) && ( global_failures > 0 ) ) {
+ pass = FALSE;
+ failure_mssg = "One or more processes report failure.\n";
+ }
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+ if ( pass ) {
+ HDfprintf(stdout, "Done.\n");
+ } else {
+ HDfprintf(stdout, "FAILED.\n");
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+ }
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
-static void
-test_parallel_read( int mpi_rank, int mpi_size )
+ return(! pass);
+
+} /* generate_test_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_parallel_read
+ *
+ * Purpose: *** Richard -- please fill this in ***
+ *
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_parallel_read(int mpi_rank)
{
- int status, errors = 0;
- hid_t access_plist = -1, dataset = -1;
- hid_t file_id = -1, memspace = -1, dataspace = -1;
- hsize_t i, offset, count = 1000;
- hsize_t dims[1] = {0};
- float nextValue, data_slice[count];
- herr_t ret;
-
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
- if (access_plist >= 0) {
- ret = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
- } else pass = false;
- if (ret >= 0) {
- file_id = H5Fopen(datafile_relocated,H5F_ACC_RDONLY,access_plist);
- } else pass = false;
- if (file_id >= 0) {
- dataset = H5Dopen2(file_id, "dataset0", H5P_DEFAULT);
- } else pass = false;
- if (dataset >= 0) {
- dims[0] = count;
- memspace = H5Screate_simple(1, dims, NULL);
- } else pass = false;
- if ( memspace >= 0 ) {
- dataspace = H5Dget_space(dataset);
- } else pass = false;
- if ( dataspace >= 0 ) {
- offset = mpi_rank * count;
- ret = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, &offset, NULL, &count, NULL);
- } else pass = false;
- if ( ret >= 0 ) {
- ret = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_slice);
- } else pass = false;
- if (ret >= 0) {
- nextValue = (float)(mpi_rank * count);
- for (i=0; i < count; i++) {
- if (data_slice[i] != nextValue) pass = false;
- nextValue += 1;
- }
- } else pass = false;
-
- status = ( pass ? 0 : -1 );
- MPI_Allreduce( &status, &errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
-
- if ( mpi_rank == 0)
- HDfprintf(stderr, "H5Fopen/H5Dread/data_validation %s\n", ((errors == 0) ? "succeeded" : "FAILED"));
-
- H5Pclose(access_plist);
- H5Dclose(dataset);
- H5Fclose(file_id);
-
- /* Cleanup */
- unlink(datafile_relocated);
-
- return;
-}
+ const char *failure_mssg;
+ const char *fcn_name = "test_parallel_read()";
+ char reloc_data_filename[FILENAME_BUF_SIZE];
+ int local_failure = 0;
+ int global_failures = 0;
+ hid_t fapl_id;
+ hid_t file_id;
+ hid_t dset_id;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t count = COUNT;
+ hsize_t dims[1] = {0};
+ float nextValue;
+ float *data_slice = NULL;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ TESTING("parallel file open test 1");
+ }
+
+ /* allocate space for the data_slice array */
+ if ( pass ) {
+ if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+ }
+
+
+ /* construct file file name */
+ if ( pass ) {
+ HDassert(FILENAMES[1]);
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, reloc_data_filename,
+ sizeof(reloc_data_filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname(1) failed.\n";
+ }
+ }
+
+ /* setup FAPL */
+ if ( pass ) {
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_fapl_mpio() failed\n";
+ }
+ }
+
+ /* open the file -- should have user block, exercising the optimization */
+ if ( pass ) {
+ if ( (file_id = H5Fopen(reloc_data_filename,
+ H5F_ACC_RDONLY, fapl_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed\n";
+ }
+ }
+
+ /* open the data set */
+ if ( pass ) {
+ if ( (dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed\n";
+ }
+ }
+
+ /* setup memspace */
+ if ( pass ) {
+ dims[0] = count;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
+ }
+ }
+
+ /* setup filespace */
+ if ( pass ) {
+ if ( (filespace = H5Dget_space(dset_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_space(dataset) failed\n";
+ }
+ }
+
+ if ( pass ) {
+ offset = (hsize_t)mpi_rank * count;
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ &offset, NULL, &count, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed\n";
+ }
+ }
+
+ /* read this processes section of the data */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
+ filespace, H5P_DEFAULT, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+ /* verify the data */
+ if ( pass ) {
+ nextValue = (float)((hsize_t)mpi_rank * count);
+ i = 0;
+ while ( ( pass ) && ( i < count ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ /* close file, etc. */
+ if ( pass ) {
+ if ( H5Dclose(dset_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Fclose(file_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Fclose(file_id) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( H5Pclose(fapl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(fapl_id) failed.\n";
+ }
+ }
+
+ /* collect results from other processes.
+ * Only overwrite the failure message if no preveious error
+ * has been detected
+ */
+ local_failure = ( pass ? 0 : 1 );
+
+ if ( MPI_Allreduce( &local_failure, &global_failures, 1,
+ MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
+ if ( pass ) {
+ pass = FALSE;
+ failure_mssg = "MPI_Allreduce() failed.\n";
+ }
+ } else if ( ( pass ) && ( global_failures > 0 ) ) {
+ pass = FALSE;
+ failure_mssg = "One or more processes report failure.\n";
+ }
+ /* report results and finish cleanup */
+ if ( mpi_rank == 0 ) {
+ if ( pass ) {
+ PASSED();
+ } else {
+ H5_FAILED();
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+
+ HDremove(reloc_data_filename);
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+
+ return( ! pass );
+
+} /* test_parallel_read() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: *** Richard -- please fill this in ***
+ *
+ *
+ * WARNING: This test uses fork() and execve(), and
+ * therefore will not run on Windows.
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
int
main( int argc, char **argv)
{
- int status, errors, mpi_rank, mpi_size;
+ int nerrs = 0;
+ int mpi_rank;
+ int mpi_size;
- if ((status = MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
- HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
- exit(1);
- }
- if ((status = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
- HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
- exit(2);
- }
- if ((status = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
- HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
- exit(2);
- }
+ if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
+ exit(1);
+ }
- generate_test_file( mpi_rank, mpi_size );
- status = ( pass ? 0 : -1 );
+ if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
+ exit(2);
+ }
- /* Synch all ranks before attempting the parallel read */
- if ( MPI_Allreduce( &status, &errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ) != MPI_SUCCESS) {
- pass = false;
- if (mpi_rank == 0) HDfprintf(stderr, "FATAL: MPI_Allreduce returned an error\n");
- }
+ if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
+ exit(2);
+ }
+
+ H5open();
+
+ if ( mpi_rank == 0 ) {
+ HDfprintf(stdout, "========================================\n");
+ HDfprintf(stdout, "Collective file open optimization tests\n");
+ HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
+ HDfprintf(stdout, "========================================\n");
+ }
+
+ if ( mpi_size < 2 ) {
- if ( errors == 0 ) {
- test_parallel_read( mpi_rank, mpi_size );
+ if ( mpi_rank == 0 ) {
+
+ HDprintf(" Need at least 2 processes. Exiting.\n");
+ }
+ goto finish;
+ }
+
+ /* create the test files & verify that the process
+ * succeeded. If not, abort the remaining tests as
+ * they depend on the test files.
+ */
+
+ nerrs += generate_test_file( mpi_rank, mpi_size );
+
+ /* abort tests if there were any errors in test file construction */
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test file construction failed -- skipping tests.\n");
+ }
+ goto finish;
}
- MPI_Finalize();
- return 0;
-}
+ /* run the tests */
+ nerrs += test_parallel_read(mpi_rank);
+
+finish:
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( mpi_rank == 0 ) { /* only process 0 reports */
+ const char *header = "Collective file open optimization tests";
+
+ HDfprintf(stdout, "===================================\n");
+ if ( nerrs > 0 ) {
+
+ HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs);
+ }
+ else {
+ HDfprintf(stdout, "%s finished with no failures\n", header);
+ }
+ HDfprintf(stdout, "===================================\n");
+ }
+
+ /* close HDF5 library */
+ H5close();
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrs) because exit code is limited to 1byte */
+ return(nerrs > 0);
+
+} /* main() */