summaryrefslogtreecommitdiffstats
path: root/testpar/testphdf5.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2004-07-13 18:42:50 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2004-07-13 18:42:50 (GMT)
commite240c00154d986f20e8c7c0158a222324ec10d68 (patch)
tree620633d183bd557ab86a7a2e9ebbf9850d984638 /testpar/testphdf5.c
parent0b2827f9ced045e36e4a88e16cfef7ca9250c89e (diff)
downloadhdf5-e240c00154d986f20e8c7c0158a222324ec10d68.zip
hdf5-e240c00154d986f20e8c7c0158a222324ec10d68.tar.gz
hdf5-e240c00154d986f20e8c7c0158a222324ec10d68.tar.bz2
[svn-r8868] Purpose:
Bug fix Description: Fix error in chunked dataset I/O where data written out wasn't read correctly from a chunked, extendible dataset after the dataset was extended. Also, fix parallel I/O tests to gather error results from all processes, in order to detect errors that only occur on one process. Solution: Bypass chunk cache for reads as well as writes, if parallel I/O driver is used and file is opened for writing. Platforms tested: FreeBSD 4.10 (sleipnir) w/parallel Too minor to require h5committest
Diffstat (limited to 'testpar/testphdf5.c')
-rw-r--r--testpar/testphdf5.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 7000936..8c533aa 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -12,8 +12,6 @@
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/* $Id$ */
-
/*
* Main driver of the Parallel HDF5 tests
*/
@@ -433,6 +431,8 @@ int main(int argc, char **argv)
"dataset collective write", filenames[1]);
AddTest("indwriteext", extend_writeInd, NULL,
"extendible dataset independent write", filenames[2]);
+ AddTest("indwriteext2", extend_writeInd2, NULL,
+ "extendible dataset independent write #2", filenames[2]);
AddTest("collwriteext", extend_writeAll, NULL,
"extendible dataset collective write", filenames[2]);
@@ -479,7 +479,7 @@ int main(int argc, char **argv)
if (CleanUp && !getenv("HDF5_NOCLEANUP"))
TestCleanup();
- nerrors = GetTestNumErrs();
+ nerrors += GetTestNumErrs();
}
@@ -523,6 +523,9 @@ int main(int argc, char **argv)
MPI_BANNER("extendible dataset independent write...");
extend_writeInd(filenames[2]);
+ MPI_BANNER("extendible dataset independent write #2...");
+ extend_writeInd2(filenames[2]);
+
MPI_BANNER("extendible dataset collective write...");
extend_writeAll(filenames[2]);
}
@@ -589,22 +592,28 @@ finish:
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Reduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+ if(mpi_rank==0)
+ nerrors=temp;
+ }
+
if (MAINPROCESS){ /* only process 0 reports */
printf("===================================\n");
- if (nerrors){
+ if (nerrors)
printf("***PHDF5 tests detected %d errors***\n", nerrors);
- }
- else{
+ else
printf("PHDF5 tests finished with no errors\n");
- }
printf("===================================\n");
}
- if (dowrite){
+ if (dowrite)
h5_cleanup(FILENAME, fapl);
- } else {
+ else
/* h5_cleanup would have closed fapl. Now must do it explicitedly */
H5Pclose(fapl);
- }
/* close HDF5 library */
H5close();