summaryrefslogtreecommitdiffstats
path: root/fortran/testpar/ptest.f90
blob: 80f40916ee050426e1ef8dab4e4b1839fc061345 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 
!   Copyright by The HDF Group.                                               *
!   Copyright by the Board of Trustees of the University of Illinois.         *
!   All rights reserved.                                                      *
!                                                                             *
!   This file is part of HDF5.  The full HDF5 copyright notice, including     *
!   terms governing use, modification, and redistribution, is contained in    *
!   the files COPYING and Copyright.html.  COPYING can be found at the root   *
!   of the source code distribution tree; Copyright.html can be found at the  *
!   root level of an installed copy of the electronic HDF5 document set and   *
!   is linked from the top-level documents page.  It can also be found at     *
!   http://hdfgroup.org/HDF5/doc/Copyright.html.  If you do not have          *
!   access to either file, you may request a copy from help@hdfgroup.org.     *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 

!//////////////////////////////////////////////////////////
! main program for parallel HDF5 Fortran tests
!//////////////////////////////////////////////////////////

program parallel_test
use hdf5
implicit none
include 'mpif.h'

integer :: mpierror                             ! MPI hdferror flag
integer :: hdferror                             ! HDF hdferror flag
logical :: do_collective                        ! use collective MPI I/O
logical :: do_chunk                             ! use chunking
integer :: nerrors = 0                          ! number of errors
integer :: mpi_rank                             ! rank of the calling process in the communicator 
integer :: lenght = 12000                       ! lenght of array

!//////////////////////////////////////////////////////////
! initialize MPI
!//////////////////////////////////////////////////////////

call mpi_init(mpierror)
call mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )

!//////////////////////////////////////////////////////////
! initialize the HDF5 fortran interface
!//////////////////////////////////////////////////////////

call h5open_f(hdferror)

!//////////////////////////////////////////////////////////
! test write/read dataset by hyperslabs with independent MPI I/O
!//////////////////////////////////////////////////////////

if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'

do_collective = .false.
do_chunk      = .false.
call hyper(lenght,do_collective,do_chunk,nerrors)

!//////////////////////////////////////////////////////////
! test write/read dataset by hyperslabs with collective MPI I/O
!//////////////////////////////////////////////////////////

if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'

do_collective = .true.
do_chunk      = .false.
call hyper(lenght,do_collective,do_chunk,nerrors)

!//////////////////////////////////////////////////////////
! test write/read dataset by hyperslabs with independent MPI I/O
!//////////////////////////////////////////////////////////

if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'

do_collective = .false.
do_chunk      = .true.
call hyper(lenght,do_collective,do_chunk,nerrors)

!//////////////////////////////////////////////////////////
! test write/read dataset by hyperslabs with collective MPI I/O
!//////////////////////////////////////////////////////////

if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'

do_collective = .true.
do_chunk      = .true.
call hyper(lenght,do_collective,do_chunk,nerrors)

!//////////////////////////////////////////////////////////
! test write/read several datasets (independent MPI I/O)
!//////////////////////////////////////////////////////////

if (mpi_rank == 0) write(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'

do_collective = .false.
do_chunk      = .false.
call multiple_dset_write(lenght,do_collective,do_chunk,nerrors)


!//////////////////////////////////////////////////////////
! close HDF5 interface
!//////////////////////////////////////////////////////////

call h5close_f(hdferror)

!//////////////////////////////////////////////////////////
! close MPI
!//////////////////////////////////////////////////////////

if (nerrors == 0) then
 call mpi_finalize(mpierror)
else
 write(*,*) 'Errors detected in process ', mpi_rank
 call mpi_abort(MPI_COMM_WORLD, 1, mpierror)
endif

!//////////////////////////////////////////////////////////
! end main program 
!//////////////////////////////////////////////////////////

end program parallel_test