summaryrefslogtreecommitdiffstats
path: root/testpar/testphdf5.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2004-07-13 18:42:47 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2004-07-13 18:42:47 (GMT)
commit803bb3e532c0c2ff26f6b7cc115a8c6f33ea00f5 (patch)
tree6e71ca792f570ce7280c5338e9e7b9f5f7fdc003 /testpar/testphdf5.c
parent0a8d8c54b249b81c58e4ab7d6481d737e2857c7a (diff)
downloadhdf5-803bb3e532c0c2ff26f6b7cc115a8c6f33ea00f5.zip
hdf5-803bb3e532c0c2ff26f6b7cc115a8c6f33ea00f5.tar.gz
hdf5-803bb3e532c0c2ff26f6b7cc115a8c6f33ea00f5.tar.bz2
[svn-r8867] Purpose:
Bug fix Description: Fix error in chunked dataset I/O where data written out wasn't read correctly from a chunked, extendible dataset after the dataset was extended. Also, fix parallel I/O tests to gather error results from all processes, in order to detect errors that only occur on one process. Solution: Bypass chunk cache for reads as well as writes, if parallel I/O driver is used and file is opened for writing. Platforms tested: FreeBSD 4.10 (sleipnir) w/parallel Too minor to require h5committest
Diffstat (limited to 'testpar/testphdf5.c')
-rw-r--r--testpar/testphdf5.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 990095e..551a02a 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -12,8 +12,6 @@
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/* $Id$ */
-
/*
* Main driver of the Parallel HDF5 tests
*/
@@ -434,6 +432,8 @@ int main(int argc, char **argv)
"dataset collective write", filenames[1]);
AddTest("indwriteext", extend_writeInd, NULL,
"extendible dataset independent write", filenames[2]);
+ AddTest("indwriteext2", extend_writeInd2, NULL,
+ "extendible dataset independent write #2", filenames[2]);
AddTest("collwriteext", extend_writeAll, NULL,
"extendible dataset collective write", filenames[2]);
@@ -482,7 +482,7 @@ int main(int argc, char **argv)
if (CleanUp && !getenv("HDF5_NOCLEANUP"))
TestCleanup();
- nerrors = GetTestNumErrs();
+ nerrors += GetTestNumErrs();
}
@@ -526,6 +526,9 @@ int main(int argc, char **argv)
MPI_BANNER("extendible dataset independent write...");
extend_writeInd(filenames[2]);
+ MPI_BANNER("extendible dataset independent write #2...");
+ extend_writeInd2(filenames[2]);
+
MPI_BANNER("extendible dataset collective write...");
extend_writeAll(filenames[2]);
}
@@ -600,22 +603,28 @@ finish:
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Reduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+ if(mpi_rank==0)
+ nerrors=temp;
+ }
+
if (MAINPROCESS){ /* only process 0 reports */
printf("===================================\n");
- if (nerrors){
+ if (nerrors)
printf("***PHDF5 tests detected %d errors***\n", nerrors);
- }
- else{
+ else
printf("PHDF5 tests finished with no errors\n");
- }
printf("===================================\n");
}
- if (dowrite){
+ if (dowrite)
h5_cleanup(FILENAME, fapl);
- } else {
+ else
/* h5_cleanup would have closed fapl. Now must do it explicitedly */
H5Pclose(fapl);
- }
/* close HDF5 library */
H5close();