summaryrefslogtreecommitdiffstats
path: root/testpar/t_dset.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2003-02-10 17:26:09 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2003-02-10 17:26:09 (GMT)
commit24d8506dd564c5cc0fdebb5ebdfaec7bda5a7435 (patch)
tree6b2eb3bb1e782c40718204882428e6471b6281ac /testpar/t_dset.c
parent738661ab9f409b8d961ba1402d6c4dd5f99ecb43 (diff)
downloadhdf5-24d8506dd564c5cc0fdebb5ebdfaec7bda5a7435.zip
hdf5-24d8506dd564c5cc0fdebb5ebdfaec7bda5a7435.tar.gz
hdf5-24d8506dd564c5cc0fdebb5ebdfaec7bda5a7435.tar.bz2
[svn-r6387] Purpose:
Bug Fix Description: Metadata cache in parallel I/O can cause hangs in applications which perform independent I/O on chunked datasets, because the metadata cache can attempt to flush out dirty metadata from only a single process, instead of collectively from all processes. Solution: Pass a dataset transfer property list down from every API function which could possibly trigger metadata I/O. Then, split the metadata cache into two sets of entries to allow dirty metadata to be set aside when a hash table collision occurs during independent I/O. Platforms tested: Tested h5committest {arabica (fortran), eirene (fortran, C++) modi4 (parallel, fortran)} FreeBSD 4.7 (sleipnir) serial & parallel Misc. update: Updated release_docs/RELEASE
Diffstat (limited to 'testpar/t_dset.c')
-rw-r--r--testpar/t_dset.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index a65a3ae..eea1bd3 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -589,9 +589,9 @@ dataset_writeAll(char *filename)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
MESG("writeAll by Row");
@@ -1097,6 +1097,23 @@ extend_writeInd(char *filename)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
+/* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+{
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret=H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts=4;
+ ret=H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+}
+
/* create the file collectively */
fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
VRFY((fid >= 0), "H5Fcreate succeeded");