summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorMuQun Yang <ymuqun@hdfgroup.org>2004-08-12 05:02:59 (GMT)
committerMuQun Yang <ymuqun@hdfgroup.org>2004-08-12 05:02:59 (GMT)
commit4dfae9e8975c6331387dd3b4574ee5f177bdfafc (patch)
treebf576c2a56a01db285bffce509d8b98bcb84d9c1 /testpar
parent03ddea6cdaf07719b42ac60f2effdcdae3ef1d96 (diff)
downloadhdf5-4dfae9e8975c6331387dd3b4574ee5f177bdfafc.zip
hdf5-4dfae9e8975c6331387dd3b4574ee5f177bdfafc.tar.gz
hdf5-4dfae9e8975c6331387dd3b4574ee5f177bdfafc.tar.bz2
[svn-r9072] Purpose:
To make collective chunk IO test more general Description: Previous collective chunk IO test only works with number of processor less than 4. In case people would like to test with more processors, more general tests need to be used. Solution: The following changes have been made: 1. Change the way to use discontiguous hyperslab selection to work with larger number of processors, 2. Increase size of data array Now, I can test with processor number = 12; however, the fourth test is very slow to the independent IO write and read. Platforms tested: eirene(only parallel test codes are modified, no need to test on other platforms). Misc. update:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_coll_chunk.c23
-rw-r--r--testpar/testphdf5.h4
2 files changed, 21 insertions, 6 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 4b32bfa..680940f 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -78,8 +78,11 @@ coll_chunk3(void)
{
char *filename;
+ int mpi_size;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Comm_size(comm,&mpi_size);
filename = (char *) GetTestParameters();
- coll_chunktest(filename,4,BYROW_CONT);
+ coll_chunktest(filename,mpi_size,BYROW_CONT);
}
@@ -88,8 +91,11 @@ coll_chunk4(void)
{
char *filename;
+ int mpi_size;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Comm_size(comm,&mpi_size);
filename = (char *) GetTestParameters();
- coll_chunktest(filename,4,BYROW_DISCONT);
+ coll_chunktest(filename,mpi_size*2,BYROW_DISCONT);
}
@@ -202,7 +208,7 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
- if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
+ if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
VRFY((prop_value == 0), "H5Dwrite shouldn't use MPI Collective IO call");
}
else {
@@ -280,7 +286,7 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
- if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
+ if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
VRFY((prop_value == 0), "H5Dread shouldn't use MPI Collective IO call");
}
else {
@@ -333,12 +339,21 @@ ccslab_set(int mpi_rank, int mpi_size, hssize_t start[], hsize_t count[],
break;
case BYROW_DISCONT:
/* Each process takes several disjoint blocks. */
+ /*
block[0] = 2;
block[1] = 2;
stride[0] = 3;
stride[1] = 6;
count[0] = 2;
count[1] = 3;
+ */
+
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = (SPACE_DIM1/mpi_size)/(stride[0]*block[0]);
+ count[1] = (SPACE_DIM2)/(stride[1]*block[1]);
start[0] = SPACE_DIM1/mpi_size*mpi_rank;
start[1] = 0;
if (VERBOSE_MED) printf("slab_set BYROW_DISCONT\n");
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 359eb958..b295317 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -113,8 +113,8 @@
#define FACC_MPIPOSIX 0x8 /* MPIPOSIX */
/*Constants for collective chunk definitions */
-#define SPACE_DIM1 24
-#define SPACE_DIM2 24
+#define SPACE_DIM1 288
+#define SPACE_DIM2 288
#define BYROW_CONT 1
#define BYROW_DISCONT 2
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"