summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorMuQun Yang <ymuqun@hdfgroup.org>2005-08-25 02:45:25 (GMT)
committerMuQun Yang <ymuqun@hdfgroup.org>2005-08-25 02:45:25 (GMT)
commit4a6e1b6ba9287ba1ea84e9c09950aac5be09a59e (patch)
tree55a6d75c6d4ad24c4092365b3dcb2b7e3260090e /testpar
parent0f03c5590a3b728e136b77c0a8fa6670cc7fe5b4 (diff)
downloadhdf5-4a6e1b6ba9287ba1ea84e9c09950aac5be09a59e.zip
hdf5-4a6e1b6ba9287ba1ea84e9c09950aac5be09a59e.tar.gz
hdf5-4a6e1b6ba9287ba1ea84e9c09950aac5be09a59e.tar.bz2
[svn-r11294] Purpose:
bug fix Description: The previous version always tests with mpi-IO driver even when mpi-posix driver is set. Solution: Add the option to test mpi-posix driver for the first round collective chunk IO test. Platforms tested: IRIX64, AIX 5.1, Linux 2.4 Misc. update:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_coll_chunk.c55
1 files changed, 3 insertions, 52 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 765124d..8eb282f 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -105,6 +105,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
hid_t file,dataset, file_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
+ hbool_t use_gpfs = FALSE;
hsize_t dims[RANK], chunk_dims[RANK];
int* data_array1 = NULL;
int* data_origin1 = NULL;
@@ -123,13 +124,9 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
MPI_Comm_rank(comm,&mpi_rank);
/* Create the data space */
- acc_plist = H5Pcreate(H5P_FILE_ACCESS);
+ acc_plist = create_faccess_plist(comm,info,facc_type,use_gpfs);
VRFY((acc_plist >= 0),"");
-
- status = H5Pset_fapl_mpio(acc_plist,comm,info);
- VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
-
file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
VRFY((file >= 0),"H5Fcreate succeeded");
@@ -197,33 +194,12 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
-#ifdef H5_WANT_H5_V1_6_COMPAT
- status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
- NULL,NULL,NULL,NULL,NULL);
-#else /* H5_WANT_H5_V1_6_COMPAT */
- status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
- NULL,NULL,NULL,NULL,NULL,NULL);
-#endif /* H5_WANT_H5_V1_6_COMPAT */
- VRFY((status >= 0),"testing property list inserted succeeded");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
xfer_plist, data_array1);
VRFY((status >= 0),"dataset write succeeded");
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
- VRFY((prop_value == 0), "H5Dwrite shouldn't use MPI Collective IO call");
- }
- else {
- VRFY((prop_value == 1), "H5Dwrite didn't use MPI Collective IO call");
- }
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
status = H5Dclose(dataset);
VRFY((status >= 0),"");
@@ -251,11 +227,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
data_origin1 = (int *)malloc(SPACE_DIM1*SPACE_DIM2*sizeof(int));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
- /* Create the data space */
- acc_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((acc_plist >= 0),"");
-
- status = H5Pset_fapl_mpio(acc_plist,comm,info);
+ acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist);
@@ -283,30 +255,9 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
VRFY((xfer_plist >= 0),"");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
-#ifdef H5_WANT_H5_V1_6_COMPAT
- status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
- NULL,NULL,NULL,NULL,NULL);
-#else /* H5_WANT_H5_V1_6_COMPAT */
- status = H5Pinsert(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,H5D_XFER_COLL_CHUNK_SIZE,&prop_value,
- NULL,NULL,NULL,NULL,NULL,NULL);
-#endif /* H5_WANT_H5_V1_6_COMPAT */
- VRFY((status >= 0),"testing property list inserted succeeded");
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
xfer_plist, data_array1);
VRFY((status >=0),"dataset read succeeded");
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
- VRFY((prop_value == 0), "H5Dread shouldn't use MPI Collective IO call");
- }
- else {
- VRFY((prop_value == 1), "H5Dread didn't use MPI Collective IO call");
- }
-#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* verify the read data with original expected data */