summaryrefslogtreecommitdiffstats
path: root/src/H5Dio.c
diff options
context:
space:
mode:
authorMuQun Yang <ymuqun@hdfgroup.org>2004-07-21 23:40:27 (GMT)
committerMuQun Yang <ymuqun@hdfgroup.org>2004-07-21 23:40:27 (GMT)
commit1232d53a3209efbdfab649b66efa8b57d4a836d5 (patch)
treec2744b59f2f008552bf80f13cef6f749b06f9697 /src/H5Dio.c
parent09fd2788613838777dbdbb451464abe62466a45e (diff)
downloadhdf5-1232d53a3209efbdfab649b66efa8b57d4a836d5.zip
hdf5-1232d53a3209efbdfab649b66efa8b57d4a836d5.tar.gz
hdf5-1232d53a3209efbdfab649b66efa8b57d4a836d5.tar.bz2
[svn-r8923] Purpose:
To add a special section of codes for collective-chunk IO tests. Description: The current patch of collective chunk IO support in HDF5 can only handle some special("however, can cover many applications") cases, Inside source code, we did a careful checking to make sure other cases would not fall into this category and would not use collective IO. We also would like to test whether those collective conditions were met in our test programs. The current parallel HDF5 handled those collective IO requests in a special way. If the library finds it cannot do collective IO, it will silently change to independent IO. So basically there is no better way to check whether the library is doing what it should do without "hacking" into HDF5 source codes for the purpose of testing. But the "hacking" should not affect library work and should be easily pulled out after we get more general collective IO algorithm to work. With Quincey's suggestion, we used HDF5 property APIs to finish the job. Solution: The approach includes three parts: 1) In the test program, insert a property inside data transfer property list. Set a default value for this property. 2) Inside H5Dio.c, when the library finds that it cannot do collective IO with chunking storage, it will change the default value. 3) Then the test program will recheck the value after H5Dwrite or H5Dread to evaluate whether the current collective IO case is doing the right thing. Note: The test won't stop after it finds that the library is not doing the right thing and probably it will finish normally. The current approach is that the test program just printed out an error message. It should be changed later. Platforms tested:o copper,arabica,eirene Misc. update:
Diffstat (limited to 'src/H5Dio.c')
-rw-r--r--src/H5Dio.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 1c00c79..9a1403f 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -639,6 +639,8 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */
#ifdef H5_HAVE_PARALLEL
hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+ int prop_value,new_value;
+ htri_t check_prop;
#endif /*H5_HAVE_PARALLEL*/
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -749,6 +751,26 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert from file to memory data space")
#ifdef H5_HAVE_PARALLEL
+ /**** Test for collective chunk IO
+ notice the following code should be removed after
+ a more general collective chunk IO algorithm is applied.
+ */
+
+ if(dataset->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
+ check_prop = H5Pexist(dxpl_id,"__test__ccfoo___");
+ if(check_prop < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list");
+ if(check_prop > 0) {
+ if(H5Pget(dxpl_id,"__test__ccfoo___",&prop_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
+ if(!use_par_opt_io) {
+ new_value = 0;
+ if(H5Pset(dxpl_id,"__test__ccfoo___",&new_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value");
+ }
+ }
+ }
+ /* end Test for collective chunk IO */
/* Don't reset the transfer mode if we can't or won't use it */
if(!use_par_opt_io || !H5T_path_noop(tpath))
H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed);
@@ -835,6 +857,8 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */
#ifdef H5_HAVE_PARALLEL
hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+ int prop_value,new_value;
+ htri_t check_prop;
#endif /*H5_HAVE_PARALLEL*/
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -965,6 +989,28 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert from memory to file data space")
#ifdef H5_HAVE_PARALLEL
+ /**** Test for collective chunk IO
+ notice the following code should be removed after
+ a more general collective chunk IO algorithm is applied.
+ */
+
+ if(dataset->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
+
+ check_prop = H5Pexist(dxpl_id,"__test__ccfoo___");
+ if(check_prop < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list");
+ if(check_prop > 0) {
+ if(H5Pget(dxpl_id,"__test__ccfoo___",&prop_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
+ if(!use_par_opt_io) {
+ new_value = 0;
+ if(H5Pset(dxpl_id,"__test__ccfoo___",&new_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value");
+ }
+ }
+ }
+
+ /* end Test for collective chunk IO */
/* Don't reset the transfer mode if we can't or won't use it */
if(!use_par_opt_io || !H5T_path_noop(tpath))
H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed);