1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! Copyright by The HDF Group. *
! All rights reserved. *
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
! the COPYING file, which can be found at the root of the source code *
! distribution tree, or in https://www.hdfgroup.org/licenses. *
! If you do not have access to either file, you may request a copy from *
! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! main program for parallel HDF5 Fortran tests
!
PROGRAM parallel_test
USE HDF5
USE MPI
USE TH5_MISC
IMPLICIT NONE
INTEGER :: mpierror ! MPI hdferror flag
INTEGER :: hdferror ! HDF hdferror flag
INTEGER :: ret_total_error = 0 ! number of errors in subroutine
INTEGER :: total_error = 0 ! sum of the number of errors
INTEGER :: mpi_size ! number of processes in the group of communicator
INTEGER :: mpi_rank ! rank of the calling process in the communicator
INTEGER :: length = 12000 ! length of array
INTEGER :: i,j, sum
! use collective MPI I/O
LOGICAL, DIMENSION(1:2) :: do_collective = (/.FALSE.,.TRUE./)
CHARACTER(LEN=11), DIMENSION(1:2) :: chr_collective =(/"independent", "collective "/)
! use chunking
LOGICAL, DIMENSION(1:2) :: do_chunk = (/.FALSE.,.TRUE./)
CHARACTER(LEN=10), DIMENSION(1:2) :: chr_chunk =(/"contiguous", "chunk "/)
!
! initialize MPI
!
CALL mpi_init(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_INIT *FAILED*"
ENDIF
CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank
ENDIF
CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror )
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
ENDIF
!
! initialize the HDF5 fortran interface
!
CALL h5open_f(hdferror)
IF(mpi_rank==0) CALL write_test_header("COMPREHENSIVE PARALLEL FORTRAN TESTS")
ret_total_error = 0
CALL mpi_param_03(ret_total_error)
IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
'Testing MPI communicator and info (F03)', total_error)
ret_total_error = 0
CALL mpi_param_08(ret_total_error)
IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
'Testing MPI communicator and info (F08)', total_error)
!
! test write/read dataset by hyperslabs (contiguous/chunk) with independent/collective MPI I/O
!
DO i = 1, 2
DO j = 1, 2
ret_total_error = 0
CALL hyper(length, do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
"Writing/reading dataset by hyperslabs ("//TRIM(chr_chunk(i))//" layout, "//TRIM(chr_collective(j))//" MPI I/O)", &
total_error)
ENDDO
ENDDO
!
! test write/read several datasets (independent MPI I/O)
!
ret_total_error = 0
CALL multiple_dset_write(length, do_collective(1), do_chunk(1), mpi_size, mpi_rank, ret_total_error)
IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
'Writing/reading several datasets (contiguous layout, independent MPI I/O)', total_error)
!
! test write/read multiple hyperslab datasets
!
DO i = 1, 2
DO j = 1, 2
ret_total_error = 0
CALL pmultiple_dset_hyper_rw(do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
"Writing/reading multiple datasets by hyperslab ("//TRIM(chr_chunk(i))//" layout, "&
//TRIM(chr_collective(j))//" MPI I/O)", total_error)
ENDDO
ENDDO
!
! close HDF5 interface
!
CALL h5close_f(hdferror)
CALL MPI_ALLREDUCE(total_error, sum, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, mpierror)
IF(mpi_rank==0) CALL write_test_footer()
!
! close MPI
!
IF (total_error == 0) THEN
CALL mpi_finalize(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
ENDIF
ELSE
WRITE(*,*) 'Errors detected in process ', mpi_rank
CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
ENDIF
ENDIF
!
! end main program
!
END PROGRAM parallel_test
|