diff options
author | MuQun Yang <myang6@hdfgroup.org> | 2008-09-16 15:03:45 (GMT) |
---|---|---|
committer | MuQun Yang <myang6@hdfgroup.org> | 2008-09-16 15:03:45 (GMT) |
commit | 8bc0d5ed9019a681e1ea20c24264415d01c1cf2a (patch) | |
tree | 83a7e7a5e68a86b624ba13f11de0255bf8a15725 | |
parent | e7ff2aafa5c5dd8203f472a7703bd41bb7238420 (diff) | |
download | hdf5-8bc0d5ed9019a681e1ea20c24264415d01c1cf2a.zip hdf5-8bc0d5ed9019a681e1ea20c24264415d01c1cf2a.tar.gz hdf5-8bc0d5ed9019a681e1ea20c24264415d01c1cf2a.tar.bz2 |
[svn-r15627] This check-in fixes a bug inside parallel HDF5 testsuite.
When enable-debug is turned on, a special macro block H5_HAVE_INSTRUMENTED_LIBRARY inside HDF5 will be executed to check if some collective chunk IO test cases are being run with the correct settings(one link, multiple chunk etc.). However,when complicated derived datatype in some mpi-io packages are not supported, the library has to switch one link IO with/without the optimization to multiple chunk IO with/without the optimization. The current testsuite doesn't know this and generates a false assertion failure message.
This check-in fix this problem by providing a second property to avoid the false faiure message.
Tested at abe(NCSA linux cluster) and kagiso.
-rw-r--r-- | testpar/t_coll_chunk.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 8f4fdf2..8da2e12 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -686,6 +686,12 @@ coll_chunktest(const char* filename, status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); + + prop_value = H5D_XFER_COLL_CHUNK_FIX; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_TO_MULTI, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, + NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0),"testing property list inserted succeeded"); + break; case API_MULTI_HARD: @@ -700,6 +706,12 @@ coll_chunktest(const char* filename, status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); + + prop_value = H5D_XFER_COLL_CHUNK_FIX; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_TO_MULTI_OPT, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, + NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0),"testing property list inserted succeeded"); + break; case API_LINK_FALSE: @@ -740,7 +752,11 @@ coll_chunktest(const char* filename, case API_LINK_HARD: status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value); VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO without optimization succeeded"); + if(prop_value !=0){/*double check if the option is switched to multiple chunk internally.*/ + status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_TO_MULTI, &prop_value); + VRFY((status >= 0),"testing property list get succeeded"); + VRFY((prop_value == 1),"API to set LINK COLLECTIVE IO without optimization succeeded"); + } break; case API_MULTI_HARD: status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value); @@ -750,7 +766,11 @@ coll_chunktest(const char* filename, case API_LINK_TRUE: status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value); VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO with true optimization succeeded"); + if(prop_value !=0){/*double check if the option is switched to multiple chunk internally.*/ + status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_TO_MULTI_OPT, &prop_value); + VRFY((status >= 0),"testing property list get succeeded"); + VRFY((prop_value == 1),"API to set LINK COLLECTIVE IO without optimization succeeded"); + } break; case API_LINK_FALSE: status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value); |