summaryrefslogtreecommitdiffstats
path: root/fortran/testpar/ptest.f90
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2016-02-19 14:24:11 (GMT)
committerScot Breitenfeld <brtnfld@hdfgroup.org>2016-02-19 14:24:11 (GMT)
commit1b2c30753d214214e67f131322757fe7c6520d1f (patch)
treea71922ec63f76080856b452fe128ae589150d4b9 /fortran/testpar/ptest.f90
parent70ad55b1052e018acd90b22ab44260a8a1721e0b (diff)
downloadhdf5-1b2c30753d214214e67f131322757fe7c6520d1f.zip
hdf5-1b2c30753d214214e67f131322757fe7c6520d1f.tar.gz
hdf5-1b2c30753d214214e67f131322757fe7c6520d1f.tar.bz2
[svn-r29155] HDFFV-9652: Add fortran wrappers/test for collective metadata functions
Tested: h5committest.new
Diffstat (limited to 'fortran/testpar/ptest.f90')
-rw-r--r--fortran/testpar/ptest.f90119
1 files changed, 44 insertions, 75 deletions
diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90
index 69594b0..82dcc09 100644
--- a/fortran/testpar/ptest.f90
+++ b/fortran/testpar/ptest.f90
@@ -13,29 +13,35 @@
! access to either file, you may request a copy from help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-!//////////////////////////////////////////////////////////
+!
! main program for parallel HDF5 Fortran tests
-!//////////////////////////////////////////////////////////
+!
PROGRAM parallel_test
USE hdf5
+ USE MPI
+ USE TH5_MISC
IMPLICIT NONE
- INCLUDE 'mpif.h'
INTEGER :: mpierror ! MPI hdferror flag
INTEGER :: hdferror ! HDF hdferror flag
- LOGICAL :: do_collective ! use collective MPI I/O
- LOGICAL :: do_chunk ! use chunking
- INTEGER :: nerrors = 0 ! number of errors
+ INTEGER :: ret_total_error = 0 ! number of errors in subroutine
+ INTEGER :: total_error = 0 ! sum of the number of errors
INTEGER :: mpi_size ! number of processes in the group of communicator
INTEGER :: mpi_rank ! rank of the calling process in the communicator
INTEGER :: length = 12000 ! length of array
-
- !//////////////////////////////////////////////////////////
+ INTEGER :: i,j
+ ! use collective MPI I/O
+ LOGICAL, DIMENSION(1:2) :: do_collective = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=11), DIMENSION(1:2) :: chr_collective =(/"independent", "collective "/)
+ ! use chunking
+ LOGICAL, DIMENSION(1:2) :: do_chunk = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=10), DIMENSION(1:2) :: chr_chunk =(/"contiguous", "chunk "/)
+
+ !
! initialize MPI
- !//////////////////////////////////////////////////////////
-
+ !
CALL mpi_init(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_INIT *FAILED*"
@@ -48,74 +54,40 @@ PROGRAM parallel_test
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! initialize the HDF5 fortran interface
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5open_f(hdferror)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
+ !
+ ! test write/read dataset by hyperslabs (contiguous/chunk) with independent/collective MPI I/O
+ !
+ DO i = 1, 2
+ DO j = 1, 2
+ ret_total_error = 0
+ CALL hyper(length, do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ "Writing/reading dataset by hyperslabs ("//TRIM(chr_chunk(i))//" layout, "//TRIM(chr_collective(j))//" MPI I/O)", &
+ total_error)
+ ENDDO
+ ENDDO
+
+ !
! test write/read several datasets (independent MPI I/O)
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+ !
+ ret_total_error = 0
+ CALL multiple_dset_write(length, do_collective(1), do_chunk(1), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ 'Writing/reading several datasets (contiguous layout, independent MPI I/O)', total_error)
-
- !//////////////////////////////////////////////////////////
+ !
! close HDF5 interface
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5close_f(hdferror)
- !//////////////////////////////////////////////////////////
+ !
! close MPI
- !//////////////////////////////////////////////////////////
-
- IF (nerrors == 0) THEN
+ !
+ IF (total_error == 0) THEN
CALL mpi_finalize(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
@@ -127,10 +99,7 @@ PROGRAM parallel_test
WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
ENDIF
ENDIF
-
- !//////////////////////////////////////////////////////////
+ !
! end main program
- !//////////////////////////////////////////////////////////
-
+ !
END PROGRAM parallel_test
-