diff options
-rw-r--r-- | testpar/t_coll_chunk.c | 119 | ||||
-rw-r--r-- | testpar/t_dset.c | 56 | ||||
-rw-r--r-- | testpar/t_mdset.c | 32 | ||||
-rw-r--r-- | testpar/t_span_tree.c | 64 | ||||
-rw-r--r-- | testpar/testphdf5.c | 19 | ||||
-rw-r--r-- | testpar/testphdf5.h | 3 |
6 files changed, 206 insertions, 87 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 1476fc7..087f8bd 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -35,7 +35,7 @@ static void coll_chunktest(const char* filename,int chunk_factor,int select_fact /*------------------------------------------------------------------------- * Function: coll_chunk1 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with a single chunk * * Return: Success: 0 @@ -52,7 +52,7 @@ static void coll_chunktest(const char* filename,int chunk_factor,int select_fact /* ------------------------------------------------------------------------ * Descriptions for the selection: One big singluar selection inside one chunk - * Two dimensions, + * Two dimensions, * * dim1 = SPACE_DIM1(5760)*mpi_size * dim2 = SPACE_DIM2(3) @@ -82,7 +82,7 @@ coll_chunk1(void) /*------------------------------------------------------------------------- * Function: coll_chunk2 * - * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT selection with a single chunk * * Return: Success: 0 @@ -99,7 +99,7 @@ coll_chunk1(void) /* ------------------------------------------------------------------------ * Descriptions for the selection: many disjoint selections inside one chunk - * Two dimensions, + * Two dimensions, * * dim1 = SPACE_DIM1*mpi_size(5760) * dim2 = SPACE_DIM2(3) @@ -111,7 +111,7 @@ coll_chunk1(void) * count1 = SPACE_DIM2/stride(3/3 = 1) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ void @@ -129,7 +129,7 @@ coll_chunk2(void) /*------------------------------------------------------------------------- * Function: coll_chunk3 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -158,7 +158,7 @@ coll_chunk2(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -180,7 +180,7 @@ coll_chunk3(void) /*------------------------------------------------------------------------- * Function: coll_chunk4 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -209,7 +209,7 @@ coll_chunk3(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -230,7 +230,7 @@ coll_chunk4(void) /*------------------------------------------------------------------------- * Function: coll_chunk4 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -259,7 +259,7 @@ coll_chunk4(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -280,7 +280,7 @@ coll_chunk5(void) /*------------------------------------------------------------------------- * Function: coll_chunk6 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -309,7 +309,7 @@ coll_chunk5(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -330,7 +330,7 @@ coll_chunk6(void) /*------------------------------------------------------------------------- * Function: coll_chunk7 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -359,7 +359,7 @@ coll_chunk6(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -380,7 +380,7 @@ coll_chunk7(void) /*------------------------------------------------------------------------- * Function: coll_chunk8 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -409,7 +409,7 @@ coll_chunk7(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -430,7 +430,7 @@ coll_chunk8(void) /*------------------------------------------------------------------------- * Function: coll_chunk9 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -459,7 +459,7 @@ coll_chunk8(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -480,7 +480,7 @@ coll_chunk9(void) /*------------------------------------------------------------------------- * Function: coll_chunk10 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -509,7 +509,7 @@ coll_chunk9(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -534,7 +534,7 @@ coll_chunk10(void) * Purpose: The real testing routine for regular selection of collective chunking storage testing both write and read, - If anything fails, it may be read or write. There is no + If anything fails, it may be read or write. There is no separation test between read and write. * * Return: Success: 0 @@ -613,7 +613,7 @@ coll_chunktest(const char* filename, chunk_dims[0] = dims[0]/chunk_factor; /* to decrease the testing time, maintain bigger chunk size */ - + (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2); status = H5Pset_chunk(crp_plist, 2, chunk_dims); VRFY((status >= 0),"chunk creation property list succeeded"); @@ -639,6 +639,10 @@ coll_chunktest(const char* filename, status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((status>= 0),"MPIO collective transfer property succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status>= 0),"set independent IO collectively succeeded"); + } switch(api_option){ case API_LINK_HARD: @@ -671,8 +675,8 @@ coll_chunktest(const char* filename, break; default: ; - } - + } + #ifdef H5_HAVE_INSTRUMENTED_LIBRARY if(facc_type == FACC_MPIO) { switch(api_option){ @@ -683,44 +687,44 @@ coll_chunktest(const char* filename, VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_MULTI_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value, NULL,NULL,NULL,NULL,NULL,NULL); VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_LINK_TRUE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value, NULL,NULL,NULL,NULL,NULL,NULL); VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_LINK_FALSE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value, NULL,NULL,NULL,NULL,NULL,NULL); VRFY((status >= 0),"testing property list inserted succeeded"); - + break; case API_MULTI_COLL: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value, NULL,NULL,NULL,NULL,NULL,NULL); VRFY((status >= 0),"testing property list inserted succeeded"); - + break; case API_MULTI_IND: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value, NULL,NULL,NULL,NULL,NULL,NULL); VRFY((status >= 0),"testing property list inserted succeeded"); - + break; default: ; - } + } } #endif - + /* write data collectively */ status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace, xfer_plist, data_array1); @@ -761,7 +765,7 @@ coll_chunktest(const char* filename, break; default: ; - } + } } #endif @@ -779,7 +783,7 @@ coll_chunktest(const char* filename, if (data_array1) HDfree(data_array1); - + /* Use collective read to verify the correctness of collective write. */ /* allocate memory for data buffer */ @@ -820,6 +824,11 @@ coll_chunktest(const char* filename, status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((status>= 0),"MPIO collective transfer property succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status>= 0),"set independent IO collectively succeeded"); + } + status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace, xfer_plist, data_array1); @@ -851,12 +860,12 @@ coll_chunktest(const char* filename, /* Set up the selection */ static void -ccslab_set(int mpi_rank, - int mpi_size, - hsize_t start[], +ccslab_set(int mpi_rank, + int mpi_size, + hsize_t start[], hsize_t count[], - hsize_t stride[], - hsize_t block[], + hsize_t stride[], + hsize_t block[], int mode) { @@ -905,7 +914,7 @@ ccslab_set(int mpi_rank, case BYROW_SELECTUNBALANCE: /* The first one-third of the number of processes only select top half of the domain, The rest will select the bottom - half of the domain. */ + half of the domain. */ block[0] = 1; count[0] = 2; @@ -915,9 +924,9 @@ ccslab_set(int mpi_rank, start[1] = 0; stride[1] = 1; if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank; - else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); + else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); break; - + case BYROW_SELECTINCHUNK: /* Each process will only select one chunk */ @@ -959,10 +968,10 @@ ccslab_set(int mpi_rank, * Assume dimension rank is 2. */ static void -ccdataset_fill(hsize_t start[], +ccdataset_fill(hsize_t start[], hsize_t stride[], - hsize_t count[], - hsize_t block[], + hsize_t count[], + hsize_t block[], DATATYPE * dataset) { DATATYPE *dataptr = dataset; @@ -994,8 +1003,8 @@ ccdataset_fill(hsize_t start[], * Print the first block of the content of the dataset. */ static void -ccdataset_print(hsize_t start[], - hsize_t block[], +ccdataset_print(hsize_t start[], + hsize_t block[], DATATYPE * dataset) { @@ -1025,11 +1034,11 @@ ccdataset_print(hsize_t start[], * Print the content of the dataset. */ static int -ccdataset_vrfy(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - DATATYPE *dataset, +ccdataset_vrfy(hsize_t start[], + hsize_t count[], + hsize_t stride[], + hsize_t block[], + DATATYPE *dataset, DATATYPE *original) { hsize_t i, j,k1,k2; diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 15c562d..b7fe368 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -616,6 +616,11 @@ dataset_writeAll(void) VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* write data collectively */ MESG("writeAll by Row"); @@ -680,6 +685,11 @@ dataset_writeAll(void) VRFY((xfer_plist >= 0), ""); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* write data independently */ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -746,6 +756,11 @@ dataset_writeAll(void) VRFY((xfer_plist >= 0), ""); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* write data collectively */ MESG("writeAll with none"); @@ -806,6 +821,12 @@ dataset_writeAll(void) VRFY((xfer_plist >= 0), ""); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + + /* write data collectively */ MESG("writeAll with scalar dataspace"); @@ -947,6 +968,11 @@ dataset_readAll(void) VRFY((xfer_plist >= 0), ""); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* read data collectively */ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -1010,6 +1036,11 @@ dataset_readAll(void) VRFY((xfer_plist >= 0), ""); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* read data collectively */ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -1794,6 +1825,11 @@ extend_writeAll(void) VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* write data collectively */ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -1829,6 +1865,11 @@ extend_writeAll(void) VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Try write to dataset2 beyond its current dim sizes. Should fail. */ /* Temporary turn off auto error reporting */ @@ -1999,6 +2040,11 @@ extend_readAll(void) VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* read data collectively */ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -2041,6 +2087,11 @@ extend_readAll(void) VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* read data collectively */ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, @@ -2195,6 +2246,11 @@ compress_readAll(void) ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Try reading the data */ ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 225aa8a..6828ea5 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -223,6 +223,11 @@ void compact_dataset(void) VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Recalculate data to write. Each process writes the same data. */ for (i = 0; i < size; i++) @@ -248,6 +253,11 @@ void compact_dataset(void) VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + dataset = H5Dopen(iof, dname); VRFY((dataset >= 0), "H5Dcreate succeeded"); @@ -320,6 +330,11 @@ void null_dataset(void) VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Write "nothing" to the dataset (with type conversion) */ ret=H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval); @@ -349,7 +364,12 @@ void null_dataset(void) VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + dataset = H5Dopen(iof, dname); VRFY((dataset >= 0), "H5Dcreate succeeded"); @@ -624,6 +644,11 @@ void dataset_fillvalue(void) ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Fill write buffer with some values */ twdata=wdata; @@ -1636,8 +1661,13 @@ void io_mode_confusion(void) mpi_rank, fcn_name); status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - VRFY(( status >= 0 ), "H5Pset_dxpl_mpio() failed"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(plist_id,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status>= 0),"set independent IO collectively succeeded"); + } + + if ( verbose ) diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index 8c60da3..56127a0 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -45,7 +45,7 @@ static void coll_read_test(int chunk_factor); /*------------------------------------------------------------------------- * Function: coll_irregular_cont_write * - * Purpose: Wrapper to test the collectively irregular hyperslab write in + * Purpose: Wrapper to test the collectively irregular hyperslab write in contiguous storage * * Return: Success: 0 @@ -72,7 +72,7 @@ coll_irregular_cont_write(void) /*------------------------------------------------------------------------- * Function: coll_irregular_cont_read * - * Purpose: Wrapper to test the collectively irregular hyperslab read in + * Purpose: Wrapper to test the collectively irregular hyperslab read in contiguous storage * * Return: Success: 0 @@ -98,7 +98,7 @@ coll_irregular_cont_read(void) /*------------------------------------------------------------------------- * Function: coll_irregular_simple_chunk_write * - * Purpose: Wrapper to test the collectively irregular hyperslab write in + * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk storage(1 chunk) * * Return: Success: 0 @@ -206,7 +206,7 @@ coll_irregular_complex_chunk_read(void) * Purpose: To test the collectively irregular hyperslab write in chunk storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage + if number is equal to 0, contiguous storage * Return: Success: 0 * * Failure: -1 @@ -282,7 +282,7 @@ void coll_write_test(int chunk_factor) mdim[1] = MSPACE_DIM2*mpi_size; fsdim[0] = FSPACE_DIM1; fsdim[1] = FSPACE_DIM2*mpi_size; - + vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size); matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); @@ -341,7 +341,7 @@ void coll_write_test(int chunk_factor) /* The First selection for FILE * * block (3,2) - * stride(4,3) + * stride(4,3) * count (1,768/mpi_size) * start (0,1+768*3*mpi_rank/mpi_size) * @@ -360,10 +360,10 @@ void coll_write_test(int chunk_factor) VRFY((ret >= 0),"hyperslab selection succeeded"); /* The Second selection for FILE - * + * * block (3,768) * stride (1,1) - * count (1,1) + * count (1,1) * start (4,768*mpi_rank/mpi_size) * */ @@ -414,6 +414,11 @@ void coll_write_test(int chunk_factor) ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0),"MPIO data transfer property list succeed"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* collective write */ ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector); @@ -454,11 +459,11 @@ void coll_write_test(int chunk_factor) * Open the file. */ - /*** - - For testing collective hyperslab selection write + /*** + + For testing collective hyperslab selection write In this test, we are using independent read to check - the correctedness of collective write compared with + the correctedness of collective write compared with independent write, In order to throughly test this feature, we choose @@ -496,7 +501,7 @@ void coll_write_test(int chunk_factor) /* The First selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (1,2+768*mpi_rank/mpi_size) * @@ -522,7 +527,7 @@ void coll_write_test(int chunk_factor) /* The Second selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,1536/mpi_size) * start (2,4+1536*mpi_rank/mpi_size) * @@ -560,7 +565,7 @@ void coll_write_test(int chunk_factor) * Only the starting point is different. * The first selection * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (0,768*mpi_rank/mpi_size) * @@ -585,7 +590,7 @@ void coll_write_test(int chunk_factor) * Only the starting point is different. * The second selection * block (1,1) - * stride(1,1) + * stride(1,1) * count (3,1536/mpi_size) * start (1,2+1536*mpi_rank/mpi_size) * @@ -616,7 +621,7 @@ void coll_write_test(int chunk_factor) H5P_DEFAULT, matrix_out); VRFY((ret >= 0),"H5D independent read succeed"); - + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1); VRFY((ret >= 0),"H5D independent read succeed"); @@ -627,7 +632,7 @@ void coll_write_test(int chunk_factor) if(matrix_out[i]!=matrix_out1[i]) ret = -1; if(ret < 0) break; } - + VRFY((ret >= 0),"H5D irregular collective write succeed"); /* @@ -670,7 +675,7 @@ void coll_write_test(int chunk_factor) * Purpose: To test the collectively irregular hyperslab read in chunk storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage + if number is equal to 0, contiguous storage * Return: Success: 0 * * Failure: -1 @@ -679,8 +684,8 @@ void coll_write_test(int chunk_factor) * Dec 2nd, 2004 * * Modifications: Oct 18th, 2005 - * Note: This test must be used with the correpsonding - coll_write_test. + * Note: This test must be used with the correpsonding + coll_write_test. *------------------------------------------------------------------------- */ void coll_read_test(int chunk_factor) @@ -700,7 +705,7 @@ void coll_read_test(int chunk_factor) dataset on the disk */ #endif - hsize_t mdim[2]; + hsize_t mdim[2]; hsize_t start[2]; /* Start of hyperslab */ hsize_t stride[2]; /* Stride of hyperslab */ hsize_t count[2]; /* Block count */ @@ -733,7 +738,7 @@ void coll_read_test(int chunk_factor) /* Initialize the buffer */ - + mdim[0] = MSPACE_DIM1; mdim[1] = MSPACE_DIM2*mpi_size; matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); @@ -766,7 +771,7 @@ void coll_read_test(int chunk_factor) /* The First selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (1,2+768*mpi_rank/mpi_size) * @@ -786,7 +791,7 @@ void coll_read_test(int chunk_factor) /* The Second selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,1536/mpi_size) * start (2,4+1536*mpi_rank/mpi_size) * @@ -815,7 +820,7 @@ void coll_read_test(int chunk_factor) * Only the starting point is different. * The first selection * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (0,768*mpi_rank/mpi_size) * @@ -838,7 +843,7 @@ void coll_read_test(int chunk_factor) * Only the starting point is different. * The second selection * block (1,1) - * stride(1,1) + * stride(1,1) * count (3,1536/mpi_size) * start (1,2+1536*mpi_rank/mpi_size) * @@ -871,6 +876,11 @@ void coll_read_test(int chunk_factor) ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0),"MPIO data transfer property list succeed"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } + /* Collective read */ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 8aa3d0d..4b0d5b3 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -32,6 +32,7 @@ int ndatasets = 300; /* number of datasets to create*/ int ngroups = 512; /* number of groups to create in root * group. */ int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; H5E_auto_t old_func; /* previous error handler */ void *old_client_data; /* previous error handler arg.*/ @@ -169,6 +170,9 @@ parse_options(int argc, char **argv) case 'p': /* Use the MPI-POSIX driver access */ facc_type = FACC_MPIPOSIX; break; + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; case '2': /* Use the split-file driver with MPIO access */ /* Can use $HDF5_METAPREFIX to define the */ /* meta-file-prefix. */ @@ -412,10 +416,10 @@ int main(int argc, char **argv) "independent group and dataset read", &collngroups_params); /* By default, do not run big dataset on WIN32. */ #ifdef WIN32 - AddTest("-bigdset", big_dataset, NULL, + AddTest("-bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); #else - AddTest("bigdset", big_dataset, NULL, + AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); #endif AddTest("fill", dataset_fillvalue, NULL, @@ -453,8 +457,8 @@ int main(int argc, char **argv) AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", coll_chunk10,NULL, "multiple chunk collective IO transferring to independent IO",PARATESTFILE); - - + + /* irregular collective IO tests*/ AddTest("ccontw", @@ -532,6 +536,13 @@ int main(int argc, char **argv) "===================================\n"); } + if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){ + printf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); + } + + /* Perform requested testing */ PerformTests(); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index d74d492..686a030 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -54,6 +54,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, #define FACC_MULTI 0x4 /* Multi File */ #define FACC_MPIPOSIX 0x8 /* MPIPOSIX */ +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ +#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ /*Constants for collective chunk definitions */ #define SPACE_DIM1 24 #define SPACE_DIM2 4 @@ -188,6 +190,7 @@ extern int nerrors; /*errors count */ extern H5E_auto_t old_func; /* previous error handler */ extern void *old_client_data; /*previous error handler arg.*/ extern int facc_type; /*Test file access type */ +extern int dxfer_coll_type; /* Test program prototypes */ void multiple_dset_write(void); |