summaryrefslogtreecommitdiffstats
path: root/fortran/testpar/ptest.f90
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2008-05-21 19:55:50 (GMT)
committerScot Breitenfeld <brtnfld@hdfgroup.org>2008-05-21 19:55:50 (GMT)
commit8332e5b7d393e72f343180632bd3c067f627aa38 (patch)
tree2175ee05100cd824670b480bd26f9fc7bc8e50c7 /fortran/testpar/ptest.f90
parent8394d07bb1e614f9d376483f281ea74c6827ac6e (diff)
downloadhdf5-8332e5b7d393e72f343180632bd3c067f627aa38.zip
hdf5-8332e5b7d393e72f343180632bd3c067f627aa38.tar.gz
hdf5-8332e5b7d393e72f343180632bd3c067f627aa38.tar.bz2
[svn-r15064] Description:
Removed extra MPI calls in subroutine by just passing MPI variables into subroutines. Added checks for MPI errors. Cleaned-up formatting.
Diffstat (limited to 'fortran/testpar/ptest.f90')
-rw-r--r--fortran/testpar/ptest.f90212
1 files changed, 114 insertions, 98 deletions
diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90
index 80f4091..6f6fb2e 100644
--- a/fortran/testpar/ptest.f90
+++ b/fortran/testpar/ptest.f90
@@ -17,103 +17,119 @@
! main program for parallel HDF5 Fortran tests
!//////////////////////////////////////////////////////////
-program parallel_test
-use hdf5
-implicit none
-include 'mpif.h'
-
-integer :: mpierror ! MPI hdferror flag
-integer :: hdferror ! HDF hdferror flag
-logical :: do_collective ! use collective MPI I/O
-logical :: do_chunk ! use chunking
-integer :: nerrors = 0 ! number of errors
-integer :: mpi_rank ! rank of the calling process in the communicator
-integer :: lenght = 12000 ! lenght of array
+PROGRAM parallel_test
+ USE hdf5
+ IMPLICIT NONE
+ INCLUDE 'mpif.h'
+
+ INTEGER :: mpierror ! MPI hdferror flag
+ INTEGER :: hdferror ! HDF hdferror flag
+ LOGICAL :: do_collective ! use collective MPI I/O
+ LOGICAL :: do_chunk ! use chunking
+ INTEGER :: nerrors = 0 ! number of errors
+ INTEGER :: mpi_size ! number of processes in the group of communicator
+ INTEGER :: mpi_rank ! rank of the calling process in the communicator
+ INTEGER :: length = 12000 ! length of array
+
+ !//////////////////////////////////////////////////////////
+ ! initialize MPI
+ !//////////////////////////////////////////////////////////
+
+ CALL mpi_init(mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_INIT *FAILED*"
+ ENDIF
+ CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank
+ ENDIF
+ CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
+ ENDIF
+ !//////////////////////////////////////////////////////////
+ ! initialize the HDF5 fortran interface
+ !//////////////////////////////////////////////////////////
+
+ CALL h5open_f(hdferror)
+
+ !//////////////////////////////////////////////////////////
+ ! test write/read dataset by hyperslabs with independent MPI I/O
+ !//////////////////////////////////////////////////////////
+
+ IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'
+
+ do_collective = .FALSE.
+ do_chunk = .FALSE.
+ CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+ !//////////////////////////////////////////////////////////
+ ! test write/read dataset by hyperslabs with collective MPI I/O
+ !//////////////////////////////////////////////////////////
+
+ IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'
+
+ do_collective = .TRUE.
+ do_chunk = .FALSE.
+ CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+ !//////////////////////////////////////////////////////////
+ ! test write/read dataset by hyperslabs with independent MPI I/O
+ !//////////////////////////////////////////////////////////
+
+ IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'
+
+ do_collective = .FALSE.
+ do_chunk = .TRUE.
+ CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+ !//////////////////////////////////////////////////////////
+ ! test write/read dataset by hyperslabs with collective MPI I/O
+ !//////////////////////////////////////////////////////////
+
+ IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'
+
+ do_collective = .TRUE.
+ do_chunk = .TRUE.
+ CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+ !//////////////////////////////////////////////////////////
+ ! test write/read several datasets (independent MPI I/O)
+ !//////////////////////////////////////////////////////////
+
+ IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'
+
+ do_collective = .FALSE.
+ do_chunk = .FALSE.
+ CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+
+ !//////////////////////////////////////////////////////////
+ ! close HDF5 interface
+ !//////////////////////////////////////////////////////////
+
+ CALL h5close_f(hdferror)
+
+ !//////////////////////////////////////////////////////////
+ ! close MPI
+ !//////////////////////////////////////////////////////////
+
+ IF (nerrors == 0) THEN
+ CALL mpi_finalize(mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
+ ENDIF
+ ELSE
+ WRITE(*,*) 'Errors detected in process ', mpi_rank
+ CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
+ ENDIF
+ ENDIF
+
+ !//////////////////////////////////////////////////////////
+ ! end main program
+ !//////////////////////////////////////////////////////////
-!//////////////////////////////////////////////////////////
-! initialize MPI
-!//////////////////////////////////////////////////////////
-
-call mpi_init(mpierror)
-call mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
-
-!//////////////////////////////////////////////////////////
-! initialize the HDF5 fortran interface
-!//////////////////////////////////////////////////////////
-
-call h5open_f(hdferror)
-
-!//////////////////////////////////////////////////////////
-! test write/read dataset by hyperslabs with independent MPI I/O
-!//////////////////////////////////////////////////////////
-
-if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'
-
-do_collective = .false.
-do_chunk = .false.
-call hyper(lenght,do_collective,do_chunk,nerrors)
-
-!//////////////////////////////////////////////////////////
-! test write/read dataset by hyperslabs with collective MPI I/O
-!//////////////////////////////////////////////////////////
-
-if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'
-
-do_collective = .true.
-do_chunk = .false.
-call hyper(lenght,do_collective,do_chunk,nerrors)
-
-!//////////////////////////////////////////////////////////
-! test write/read dataset by hyperslabs with independent MPI I/O
-!//////////////////////////////////////////////////////////
-
-if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'
-
-do_collective = .false.
-do_chunk = .true.
-call hyper(lenght,do_collective,do_chunk,nerrors)
-
-!//////////////////////////////////////////////////////////
-! test write/read dataset by hyperslabs with collective MPI I/O
-!//////////////////////////////////////////////////////////
-
-if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'
-
-do_collective = .true.
-do_chunk = .true.
-call hyper(lenght,do_collective,do_chunk,nerrors)
-
-!//////////////////////////////////////////////////////////
-! test write/read several datasets (independent MPI I/O)
-!//////////////////////////////////////////////////////////
-
-if (mpi_rank == 0) write(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'
-
-do_collective = .false.
-do_chunk = .false.
-call multiple_dset_write(lenght,do_collective,do_chunk,nerrors)
-
-
-!//////////////////////////////////////////////////////////
-! close HDF5 interface
-!//////////////////////////////////////////////////////////
-
-call h5close_f(hdferror)
-
-!//////////////////////////////////////////////////////////
-! close MPI
-!//////////////////////////////////////////////////////////
-
-if (nerrors == 0) then
- call mpi_finalize(mpierror)
-else
- write(*,*) 'Errors detected in process ', mpi_rank
- call mpi_abort(MPI_COMM_WORLD, 1, mpierror)
-endif
-
-!//////////////////////////////////////////////////////////
-! end main program
-!//////////////////////////////////////////////////////////
-
-end program parallel_test
+END PROGRAM parallel_test