summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--testpar/t_coll_chunk.c440
-rw-r--r--testpar/testphdf5.c28
-rw-r--r--testpar/testphdf5.h25
3 files changed, 484 insertions, 9 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 49f0dfd..9a8dd93 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -29,7 +29,7 @@ static void ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE*dataset);
static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
hsize_t block[], DATATYPE *dataset, DATATYPE *original);
-static void coll_chunktest(const char* filename,int chunk_factor,int select_factor);
+static void coll_chunktest(const char* filename,int chunk_factor,int select_factor,int api_option);
/*-------------------------------------------------------------------------
@@ -74,7 +74,7 @@ coll_chunk1(void)
const char *filename;
filename = GetTestParameters();
- coll_chunktest(filename,1,BYROW_CONT);
+ coll_chunktest(filename,1,BYROW_CONT,API_NONE);
}
@@ -121,7 +121,7 @@ coll_chunk2(void)
const char *filename;
filename = GetTestParameters();
- coll_chunktest(filename,1,BYROW_DISCONT);
+ coll_chunktest(filename,1,BYROW_DISCONT,API_NONE);
}
@@ -173,7 +173,7 @@ coll_chunk3(void)
MPI_Comm_size(comm,&mpi_size);
filename = GetTestParameters();
- coll_chunktest(filename,mpi_size,BYROW_CONT);
+ coll_chunktest(filename,mpi_size,BYROW_CONT,API_NONE);
}
@@ -223,7 +223,307 @@ coll_chunk4(void)
MPI_Comm comm = MPI_COMM_WORLD;
filename = GetTestParameters();
- coll_chunktest(filename,1,BYROW_SELECTNONE);
+ coll_chunktest(filename,1,BYROW_SELECTNONE,API_NONE);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk4
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk5(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTUNBALANCE,API_LINK_HARD);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk6
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk6(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTUNBALANCE,API_MULTI_HARD);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk7
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk7(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTUNBALANCE,API_LINK_TRUE);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk8
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk8(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTUNBALANCE,API_LINK_FALSE);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk9
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk9(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTUNBALANCE,API_MULTI_COLL);
+
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk10
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk10(void)
+{
+
+ const char *filename;
+ int mpi_size;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ filename = GetTestParameters();
+ coll_chunktest(filename,4,BYROW_SELECTINCHUNK,API_MULTI_IND);
}
@@ -253,7 +553,8 @@ coll_chunk4(void)
static void
coll_chunktest(const char* filename,
int chunk_factor,
- int select_factor) {
+ int select_factor,
+ int api_option) {
hid_t file,dataset, file_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
@@ -264,6 +565,10 @@ coll_chunktest(const char* filename,
hsize_t start[RANK],count[RANK],stride[RANK],block[RANK];
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
hbool_t use_gpfs = FALSE;
int mpi_size,mpi_rank;
@@ -335,11 +640,131 @@ coll_chunktest(const char* filename,
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ switch(api_option){
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_OPT_ONE_IO);
+ VRFY((status>= 0),"collective chunk optimization succeeded");
+ break;
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_OPT_MULTI_IO);
+ VRFY((status>= 0),"collective chunk optimization succeeded ");
+ break;
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ break;
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ break;
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
+ VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ break;
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
+ VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ break;
+ default:
+ ;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if(facc_type == FACC_MPIO) {
+ switch(api_option){
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+
+ break;
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+
+ break;
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
+ NULL,NULL,NULL,NULL,NULL,NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+
+ break;
+ default:
+ ;
+ }
+ }
+#endif
+
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
xfer_plist, data_array1);
VRFY((status >= 0),"dataset write succeeded");
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if(facc_type == FACC_MPIO) {
+ switch(api_option){
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO without optimization succeeded");
+ break;
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO without optimization succeeded");
+ break;
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO with true optimization succeeded");
+ break;
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+ default:
+ ;
+ }
+ }
+#endif
+
status = H5Dclose(dataset);
VRFY((status >= 0),"");
@@ -490,7 +915,8 @@ ccslab_set(int mpi_rank,
start[1] = 0;
stride[1] = 1;
if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
- else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); break;
+ else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ break;
case BYROW_SELECTINCHUNK:
/* Each process will only select one chunk */
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index a65ef6d..9b56b24 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -427,9 +427,35 @@ int main(int argc, char **argv)
coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
AddTest("cchunk3",
coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
- AddTest("cchunk4",
+ AddTest("cchunk4",
coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
+ if((mpi_size < 3)&& MAINPROCESS ) {
+ printf("Collective chunk IO optimization APIs ");
+ printf("needs at least 3 processes to participate\n");
+ printf("Collective chunk IO API tests will be skipped \n");
+ }
+ AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
+ coll_chunk5,NULL,
+ "linked chunk collective IO without optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
+ coll_chunk6,NULL,
+ "multi-chunk collective IO without optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
+ coll_chunk7,NULL,
+ "linked chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
+ coll_chunk8,NULL,
+ "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
+ coll_chunk9,NULL,
+ "multiple chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
+ coll_chunk10,NULL,
+ "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
+
+
+
/* irregular collective IO tests*/
AddTest("ccontw",
coll_irregular_cont_write,NULL,
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 7c47e0a..d74d492 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -19,6 +19,10 @@
#include "testpar.h"
+enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
+ API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
+ API_MULTI_COLL,API_MULTI_IND};
+
#ifndef FALSE
#define FALSE 0
#endif
@@ -52,12 +56,25 @@
/*Constants for collective chunk definitions */
#define SPACE_DIM1 24
-#define SPACE_DIM2 3
+#define SPACE_DIM2 4
#define BYROW_CONT 1
#define BYROW_DISCONT 2
#define BYROW_SELECTNONE 3
#define BYROW_SELECTUNBALANCE 4
#define BYROW_SELECTINCHUNK 5
+
+#define DIMO_NUM_CHUNK 4
+#define DIM1_NUM_CHUNK 2
+#define LINK_TRUE_NUM_CHUNK 2
+#define LINK_FALSE_NUM_CHUNK 6
+#define MULTI_TRUE_PERCENT 50
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
+#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
+#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
+#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
+#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
+#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp"
+
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
@@ -199,6 +216,12 @@ void coll_chunk1(void);
void coll_chunk2(void);
void coll_chunk3(void);
void coll_chunk4(void);
+void coll_chunk5(void);
+void coll_chunk6(void);
+void coll_chunk7(void);
+void coll_chunk8(void);
+void coll_chunk9(void);
+void coll_chunk10(void);
void coll_irregular_cont_read(void);
void coll_irregular_cont_write(void);
void coll_irregular_simple_chunk_read(void);