diff options
author | Allen Byrne <byrn@hdfgroup.org> | 2018-02-14 16:08:09 (GMT) |
---|---|---|
committer | Allen Byrne <byrn@hdfgroup.org> | 2018-02-14 16:08:09 (GMT) |
commit | 205d33f332bc6974b7698160ac6d399a36c93fcf (patch) | |
tree | 30cacad5e2e2614c388fbb4772f9beee87cef567 /testpar/t_pread.c | |
parent | 66ea3b8fb9fdff0a0ffa6ffd318ad5fc2c9a8f4b (diff) | |
download | hdf5-205d33f332bc6974b7698160ac6d399a36c93fcf.zip hdf5-205d33f332bc6974b7698160ac6d399a36c93fcf.tar.gz hdf5-205d33f332bc6974b7698160ac6d399a36c93fcf.tar.bz2 |
Cleanup overuse of include files
Diffstat (limited to 'testpar/t_pread.c')
-rw-r--r-- | testpar/t_pread.c | 73 |
1 files changed, 36 insertions, 37 deletions
diff --git a/testpar/t_pread.c b/testpar/t_pread.c index a527503..aac5bee 100644 --- a/testpar/t_pread.c +++ b/testpar/t_pread.c @@ -16,7 +16,6 @@ * */ -#include "h5test.h" #include "testpar.h" /* The collection of files is included below to aid @@ -42,7 +41,7 @@ const char *FILENAMES[NFILENAME + 1]={"t_pread_data_file", #define COUNT 1000 hbool_t pass = true; -static const char *random_hdf5_text = +static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \ manual or go thru the tutorials!\n\ While you\'re at it, now is also the time to read up on MPI-IO."; @@ -58,7 +57,7 @@ static int test_parallel_read(MPI_Comm comm, int mpi_rank, int group); static char *test_argv0 = NULL; extern char *dirname(char *path); /* Avoids additional includes */ - + /*------------------------------------------------------------------------- * Function: generate_test_file * @@ -79,7 +78,7 @@ extern char *dirname(char *path); /* Avoids additional includes */ * In the overall scheme of running the test, we'll call * this function twice: first as a collection of all MPI * processes and then a second time with the processes split - * more or less in half. Each sub group will operate + * more or less in half. Each sub group will operate * collectively on their assigned file. This split into * subgroups validates that parallel groups can successfully * open and read data independantly from the other parallel @@ -93,7 +92,7 @@ extern char *dirname(char *path); /* Avoids additional includes */ * 10/1/17 * * Modifications: - * + * *------------------------------------------------------------------------- */ static int @@ -110,14 +109,14 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) int group_size; int group_rank; int local_failure = 0; - int global_failures = 0; + int global_failures = 0; hsize_t count = COUNT; hsize_t i; hsize_t offset; hsize_t dims[1] = {0}; hid_t file_id = -1; - hid_t memspace = -1; - hid_t filespace = -1; + hid_t memspace = -1; + hid_t filespace = -1; hid_t fapl_id = -1; hid_t dxpl_id = -1; hid_t dset_id = -1; @@ -153,7 +152,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) if ( pass ) { if ( comm == MPI_COMM_WORLD ) { /* Test 1 */ file_index = 0; - } + } else if ( group_id == 0 ) { /* Test 2 group 0 */ file_index = 3; } @@ -161,8 +160,8 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) file_index = 6; } - /* The 'group_filename' is just a temp variable and - * is used to call into the h5_fixname function. No + /* The 'group_filename' is just a temp variable and + * is used to call into the h5_fixname function. No * need to worry that we reassign it for each file! */ group_filename = FILENAMES[file_index]; @@ -234,9 +233,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } } - /* create the data file */ + /* create the data file */ if ( pass ) { - if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC, + if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0 ) { pass = FALSE; failure_mssg = "H5Fcreate() failed.\n"; @@ -276,7 +275,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) if ( pass ) { offset = (hsize_t)group_rank * (hsize_t)COUNT; - if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, + if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0 ) { pass = FALSE; failure_mssg = "H5Sselect_hyperslab() failed.\n"; @@ -284,8 +283,8 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } if ( pass ) { - if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, - filespace, H5P_DEFAULT, H5P_DEFAULT, + if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, + filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0 ) { pass = false; failure_mssg = "H5Dcreate2() failed.\n"; @@ -293,7 +292,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } if ( pass ) { - if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, + if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0 ) { pass = false; failure_mssg = "H5Dwrite() failed.\n"; @@ -344,15 +343,15 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } /* Add a userblock to the head of the datafile. - * We will use this to for a functional test of the + * We will use this to for a functional test of the * file open optimization. This is superblock * relocation is done by the rank 0 process associated * with the communicator being used. For test 1, we * utilize MPI_COMM_WORLD, so group_rank 0 is the * same as mpi_rank 0. For test 2 which utilizes * two groups resulting from an MPI_Comm_split, we - * will have parallel groups and hence two - * group_rank(0) processes. Each parallel group + * will have parallel groups and hence two + * group_rank(0) processes. Each parallel group * will create a unique file with different text * headers and different data. * @@ -361,7 +360,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) if ( group_rank == 0 ) { const char *text_to_write; - size_t bytes_to_write; + size_t bytes_to_write; if (group_id == 0) text_to_write = random_hdf5_text; @@ -406,7 +405,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) * if h5jam is co-located here. Otherwise, the autotools * put things into directories, hence the relative path. */ - if (test_argv0 != NULL) { + if (test_argv0 != NULL) { HDstrncpy(exe_path, test_argv0, sizeof(exe_path)); if ( (exe_dirname = (char *)dirname(exe_path)) != NULL) { HDsprintf(cmd, "%s/h5jam", exe_dirname); @@ -430,13 +429,13 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } /* collect results from other processes. - * Only overwrite the failure message if no preveious error + * Only overwrite the failure message if no preveious error * has been detected */ local_failure = ( pass ? 0 : 1 ); /* This is a global all reduce (NOT group specific) */ - if ( MPI_Allreduce(&local_failure, &global_failures, 1, + if ( MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) { if ( pass ) { pass = FALSE; @@ -468,7 +467,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } /* generate_test_file() */ - + /*------------------------------------------------------------------------- * Function: test_parallel_read * @@ -511,7 +510,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) const char *group_filename = NULL; char reloc_data_filename[FILENAME_BUF_SIZE]; int local_failure = 0; - int global_failures = 0; + int global_failures = 0; int group_size; int group_rank; hid_t fapl_id = -1; @@ -597,7 +596,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) /* open the file -- should have user block, exercising the optimization */ if ( pass ) { - if ( (file_id = H5Fopen(reloc_data_filename, + if ( (file_id = H5Fopen(reloc_data_filename, H5F_ACC_RDONLY, fapl_id)) < 0 ) { pass = FALSE; failure_mssg = "H5Fopen() failed\n"; @@ -631,7 +630,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) if ( pass ) { offset = (hsize_t)group_rank * count; - if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, + if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0 ) { pass = FALSE; failure_mssg = "H5Sselect_hyperslab() failed\n"; @@ -640,14 +639,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) /* read this processes section of the data */ if ( pass ) { - if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, + if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, data_slice)) < 0 ) { pass = FALSE; failure_mssg = "H5Dread() failed\n"; } } - - /* verify the data */ + + /* verify the data */ if ( pass ) { nextValue = (float)((hsize_t)mpi_rank * count); i = 0; @@ -708,7 +707,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) */ local_failure = ( pass ? 0 : 1 ); - if ( MPI_Allreduce( &local_failure, &global_failures, 1, + if ( MPI_Allreduce( &local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) { if ( pass ) { pass = FALSE; @@ -739,11 +738,11 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) } - return( ! pass ); + return( ! pass ); } /* test_parallel_read() */ - + /*------------------------------------------------------------------------- * Function: main * @@ -830,7 +829,7 @@ main( int argc, char **argv) /* ------ Create two (2) MPI groups ------ * - * We split MPI_COMM_WORLD into 2 more or less equal sized + * We split MPI_COMM_WORLD into 2 more or less equal sized * groups. The resulting communicators will be used to generate * two HDF files which in turn will be opened in parallel and the * contents verified in the second read test below. @@ -858,7 +857,7 @@ main( int argc, char **argv) } goto finish; } - + /* We generate the file used for test 2 */ nerrs += generate_test_file( group_comm, mpi_rank, which_group ); @@ -917,7 +916,7 @@ finish: HDfprintf(stdout, "===================================\n"); if ( nerrs > 0 ) { - + HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs); } else { |