diff options
Diffstat (limited to 'testpar/t_pread.c')
-rw-r--r-- | testpar/t_pread.c | 422 |
1 files changed, 401 insertions, 21 deletions
diff --git a/testpar/t_pread.c b/testpar/t_pread.c index 0905d44..74feeb6 100644 --- a/testpar/t_pread.c +++ b/testpar/t_pread.c @@ -17,6 +17,7 @@ */ #include "testpar.h" +#include "H5Dprivate.h" /* The collection of files is included below to aid * an external "cleanup" process if required. @@ -34,6 +35,8 @@ const char *FILENAMES[NFILENAME + 1]={"reloc_t_pread_data_file", #define COUNT 1000 +#define LIMIT_NPROC 6 + hbool_t pass = true; static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \ @@ -46,7 +49,7 @@ completely foolproof is to underestimate the ingenuity of complete\n\ fools.\n"; static int generate_test_file(MPI_Comm comm, int mpi_rank, int group); -static int test_parallel_read(MPI_Comm comm, int mpi_rank, int group); +static int test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group); static char *test_argv0 = NULL; @@ -108,6 +111,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) hid_t fapl_id = -1; hid_t dxpl_id = -1; hid_t dset_id = -1; + hid_t dset_id_ch = -1; + hid_t dcpl_id = H5P_DEFAULT; + hsize_t chunk[1]; float nextValue; float *data_slice = NULL; @@ -272,6 +278,55 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) } } + + /* create a chunked dataset */ + chunk[0] = COUNT/8; + + if ( pass ) { + if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) { + pass = false; + failure_mssg = "H5Pcreate() failed.\n"; + } + } + + if ( pass ) { + if ( (H5Pset_chunk (dcpl_id, 1, chunk) ) < 0 ) { + pass = false; + failure_mssg = "H5Pset_chunk() failed.\n"; + } + } + + if ( pass ) { + + if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT, + filespace, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT)) < 0 ) { + pass = false; + failure_mssg = "H5Dcreate2() failed.\n"; + } + } + + if ( pass ) { + if ( (H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace, + filespace, dxpl_id, data_slice)) < 0 ) { + pass = false; + failure_mssg = "H5Dwrite() failed.\n"; + } + } + if ( pass || (dcpl_id != -1)) { + if ( H5Pclose(dcpl_id) < 0 ) { + pass = false; + failure_mssg = "H5Pclose(dcpl_id) failed.\n"; + } + } + + if ( pass || (dset_id_ch != -1)) { + if ( H5Dclose(dset_id_ch) < 0 ) { + pass = false; + failure_mssg = "H5Dclose(dset_id_ch) failed.\n"; + } + } + /* close file, etc. */ if ( pass || (dset_id != -1)) { if ( H5Dclose(dset_id) < 0 ) { @@ -413,7 +468,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) * Function: test_parallel_read * * Purpose: This actually tests the superblock optimization - * and covers the two primary cases we're interested in. + * and covers the three primary cases we're interested in. * 1). That HDF5 files can be opened in parallel by * the rank 0 process and that the superblock * offset is correctly broadcast to the other @@ -423,6 +478,10 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) * subgroups of MPI_COMM_WORLD and that each * subgroup operates as described in (1) to * collectively read the data. + * 3). Testing proc0-read-and-MPI_Bcast using + * sub-communicators, and reading into + * a memory space that is different from the + * file space, and chunked datasets. * * The global MPI rank is used for reading and * writing data for process specific data in the @@ -444,7 +503,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id ) *------------------------------------------------------------------------- */ static int -test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) +test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id) { const char *failure_mssg; const char *fcn_name = "test_parallel_read()"; @@ -457,8 +516,13 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) hid_t fapl_id = -1; hid_t file_id = -1; hid_t dset_id = -1; + hid_t dset_id_ch = -1; + hid_t dxpl_id = H5P_DEFAULT; hid_t memspace = -1; hid_t filespace = -1; + hid_t filetype = -1; + size_t filetype_size; + hssize_t dset_size; hsize_t i; hsize_t offset; hsize_t count = COUNT; @@ -552,6 +616,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) } } + /* open the chunked data set */ + if ( pass ) { + if ( (dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dopen2() failed\n"; + } + } + /* setup memspace */ if ( pass ) { dims[0] = count; @@ -606,14 +678,6 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) } } - /* close file, etc. */ - if ( pass || (dset_id != -1) ) { - if ( H5Dclose(dset_id) < 0 ) { - pass = false; - failure_mssg = "H5Dclose(dset_id) failed.\n"; - } - } - if ( pass || (memspace != -1) ) { if ( H5Sclose(memspace) < 0 ) { pass = false; @@ -628,6 +692,330 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) } } + /* free data_slice if it has been allocated */ + if ( data_slice != NULL ) { + HDfree(data_slice); + data_slice = NULL; + } + + /* + * Test reading proc0-read-and-bcast with sub-communicators + */ + + /* Don't test with more than LIMIT_NPROC processes to avoid memory issues */ + + if( group_size <= LIMIT_NPROC ) { +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + if ( (filespace = H5Dget_space(dset_id )) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dget_space failed.\n"; + } + + if ( (dset_size = H5Sget_simple_extent_npoints(filespace)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Sget_simple_extent_npoints failed.\n"; + } + + if ( (filetype = H5Dget_type(dset_id)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dget_type failed.\n"; + } + + if ( (filetype_size = H5Tget_size(filetype)) == 0 ) { + pass = FALSE; + failure_mssg = "H5Tget_size failed.\n"; + } + + if ( H5Tclose(filetype) < 0 ) { + pass = FALSE; + failure_mssg = "H5Tclose failed.\n"; + }; + + if ( (data_slice = (float *)HDmalloc((size_t)dset_size*filetype_size)) == NULL ) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + + if ( pass ) { + if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n"; + } + } + + if ( pass ) { + if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed.\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if(H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, + NULL, NULL, NULL, NULL, NULL, NULL) < 0) { + pass = FALSE; + failure_mssg = "H5Pinsert2() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* read H5S_ALL section */ + if ( pass ) { + if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL, + H5S_ALL, dxpl_id, data_slice)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = FALSE; + if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if(prop_value != TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if ( pass ) { + + if ( comm == MPI_COMM_WORLD ) /* test 1 */ + nextValue = 0; + else if ( group_id == 0 ) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)( mpi_size / 2 )*count); + + i = 0; + while ( ( pass ) && ( i < (hsize_t)dset_size ) ) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ( ( data_slice[i] > nextValue ) || + ( data_slice[i] < nextValue ) ) { + pass = FALSE; + failure_mssg = "Unexpected dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + /* read H5S_ALL section for the chunked dataset */ + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pset() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for ( i = 0; i < (hsize_t)dset_size; i++) { + data_slice[i] = 0; + } + if ( pass ) { + if ( (H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL, + H5S_ALL, dxpl_id, data_slice)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = FALSE; + if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if(prop_value == TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if ( pass ) { + + if ( comm == MPI_COMM_WORLD ) /* test 1 */ + nextValue = 0; + else if ( group_id == 0 ) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)( mpi_size / 2 )*count); + + i = 0; + while ( ( pass ) && ( i < (hsize_t)dset_size ) ) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ( ( data_slice[i] > nextValue ) || + ( data_slice[i] < nextValue ) ) { + pass = FALSE; + failure_mssg = "Unexpected chunked dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + if ( pass || (filespace != -1) ) { + if ( H5Sclose(filespace) < 0 ) { + pass = false; + failure_mssg = "H5Sclose(filespace) failed.\n"; + } + } + + /* free data_slice if it has been allocated */ + if ( data_slice != NULL ) { + HDfree(data_slice); + data_slice = NULL; + } + + /* + * Read an H5S_ALL filespace into a hyperslab defined memory space + */ + + if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + + /* setup memspace */ + if ( pass ) { + dims[0] = (hsize_t)dset_size*2; + if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n"; + } + } + if ( pass ) { + offset = (hsize_t)dset_size; + if ( (H5Sselect_hyperslab(memspace, H5S_SELECT_SET, + &offset, NULL, &offset, NULL)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pset() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* read this processes section of the data */ + if ( pass ) { + if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, + H5S_ALL, dxpl_id, data_slice)) < 0 ) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if ( pass ) { + prop_value = FALSE; + if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if(prop_value != TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if ( pass ) { + + if ( comm == MPI_COMM_WORLD ) /* test 1 */ + nextValue = 0; + else if ( group_id == 0 ) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)(mpi_size / 2)*count); + + i = (hsize_t)dset_size; + while ( ( pass ) && ( i < (hsize_t)dset_size ) ) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ( ( data_slice[i] > nextValue ) || + ( data_slice[i] < nextValue ) ) { + pass = FALSE; + failure_mssg = "Unexpected dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + if ( pass || (memspace != -1) ) { + if ( H5Sclose(memspace) < 0 ) { + pass = false; + failure_mssg = "H5Sclose(memspace) failed.\n"; + } + } + + /* free data_slice if it has been allocated */ + if ( data_slice != NULL ) { + HDfree(data_slice); + data_slice = NULL; + } + + if ( pass || (dxpl_id != -1) ) { + if ( H5Pclose(dxpl_id) < 0 ) { + pass = false; + failure_mssg = "H5Pclose(dxpl_id) failed.\n"; + } + } + } + + /* close file, etc. */ + if ( pass || (dset_id != -1) ) { + if ( H5Dclose(dset_id) < 0 ) { + pass = false; + failure_mssg = "H5Dclose(dset_id) failed.\n"; + } + } + + if ( pass || (dset_id_ch != -1) ) { + if ( H5Dclose(dset_id_ch) < 0 ) { + pass = false; + failure_mssg = "H5Dclose(dset_id_ch) failed.\n"; + } + } + if ( pass || (file_id != -1) ) { if ( H5Fclose(file_id) < 0 ) { pass = false; @@ -668,17 +1056,9 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id) HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); } - HDremove(reloc_data_filename); } - /* free data_slice if it has been allocated */ - if ( data_slice != NULL ) { - HDfree(data_slice); - data_slice = NULL; - } - - return( ! pass ); } /* test_parallel_read() */ @@ -803,7 +1183,7 @@ main( int argc, char **argv) } /* Now read the generated test file (stil using MPI_COMM_WORLD) */ - nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, which_group); + nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, mpi_size, which_group); if ( nerrs > 0 ) { if ( mpi_rank == 0 ) { @@ -819,7 +1199,7 @@ main( int argc, char **argv) } /* run the 2nd set of tests */ - nerrs += test_parallel_read(group_comm, mpi_rank, which_group); + nerrs += test_parallel_read(group_comm, mpi_rank, mpi_size, which_group); if ( nerrs > 0 ) { if ( mpi_rank == 0 ) { |