summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fortran/src/H5Pff.F90183
-rw-r--r--fortran/src/hdf5_fortrandll.def.in4
-rw-r--r--fortran/test/tf.F902
-rw-r--r--fortran/testpar/hyper.f90143
-rw-r--r--fortran/testpar/ptest.f90119
5 files changed, 330 insertions, 121 deletions
diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90
index 6c6abe4..e66a7f2 100644
--- a/fortran/src/H5Pff.F90
+++ b/fortran/src/H5Pff.F90
@@ -7320,6 +7320,188 @@ SUBROUTINE h5pset_attr_phase_change_f(ocpl_id, max_compact, min_dense, hdferr)
hdferr = h5pget_mpio_actual_io_mode_c(dxpl_id, actual_io_mode)
END SUBROUTINE h5pget_mpio_actual_io_mode_f
+
+!****s* H5P/h5pset_all_coll_metadata_ops_f
+! NAME
+! h5pset_all_coll_metadata_ops_f
+!
+! PURPOSE
+! Sets requirement whether HDF5 metadata read operations using the access property
+! list are required to be collective or independent. If collective requirement is
+! selected, the HDF5 library will optimize the metadata reads improving performance.
+! The default setting is independent (false).
+!
+! INPUTS
+! plist_id - File access property list identifier.
+! is_collective - Indicates if metadata writes are collective or not.
+! OUTPUTS
+! hdferr - Returns 0 if successful and -1 if fails.
+!
+! AUTHOR
+! M. Scot Breitenfeld
+! Feb, 10 2016
+!
+! HISTORY
+!
+! SOURCE
+ SUBROUTINE h5pset_all_coll_metadata_ops_f(plist_id, is_collective, hdferr)
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN) :: plist_id
+ LOGICAL, INTENT(IN) :: is_collective
+ INTEGER, INTENT(OUT) :: hdferr
+!*****
+ LOGICAL(C_BOOL) :: c_is_collective
+
+ INTERFACE
+ INTEGER FUNCTION h5pset_all_coll_metadata_ops(plist_id, is_collective) BIND(C, NAME='H5Pset_all_coll_metadata_ops')
+ IMPORT :: HID_T, C_BOOL
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN), VALUE :: plist_id
+ LOGICAL(C_BOOL), INTENT(IN), VALUE :: is_collective
+ END FUNCTION h5pset_all_coll_metadata_ops
+ END INTERFACE
+
+ ! Transfer value of Fortran LOGICAL to C c_bool type
+ c_is_collective = is_collective
+
+ hdferr = INT(H5Pset_all_coll_metadata_ops(plist_id, c_is_collective))
+
+ END SUBROUTINE h5pset_all_coll_metadata_ops_f
+
+!****s* H5P/h5pget_all_coll_metadata_ops_f
+! NAME
+! h5pget_all_coll_metadata_ops_f
+!
+! PURPOSE
+! Retrieves metadata read mode from the access property list.
+!
+! INPUTS
+! plist_id - File access property list identifier.
+! OUTPUTS
+! is_collective - Collective access setting.
+! hdferr - Returns 0 if successful and -1 if fails.
+!
+! AUTHOR
+! M. Scot Breitenfeld
+! Feb, 10 2016
+!
+! HISTORY
+!
+! SOURCE
+ SUBROUTINE h5pget_all_coll_metadata_ops_f(plist_id, is_collective, hdferr)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN) :: plist_id
+ LOGICAL, INTENT(OUT) :: is_collective
+ INTEGER, INTENT(OUT) :: hdferr
+!*****
+ LOGICAL(C_BOOL) :: c_is_collective
+
+ INTERFACE
+ INTEGER FUNCTION h5pget_all_coll_metadata_ops(plist_id, is_collective) BIND(C, NAME='H5Pget_all_coll_metadata_ops')
+ IMPORT :: HID_T, C_BOOL
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN), VALUE :: plist_id
+ LOGICAL(C_BOOL), INTENT(OUT) :: is_collective
+ END FUNCTION h5pget_all_coll_metadata_ops
+ END INTERFACE
+
+ hdferr = INT(H5Pget_all_coll_metadata_ops(plist_id, c_is_collective))
+
+ ! Transfer value of C c_bool type to Fortran LOGICAL
+ is_collective = c_is_collective
+
+ END SUBROUTINE h5pget_all_coll_metadata_ops_f
+
+!****s* H5P/h5pset_coll_metadata_write_f
+! NAME
+! h5pset_coll_metadata_write_f
+!
+! PURPOSE
+! Sets metadata writes to collective or independent. Default setting is independent (false).
+!
+! INPUTS
+! fapl_id - File access property list identifier.
+! is_collective - Indicates if metadata writes are collective or not.
+! OUTPUTS
+! hdferr - Returns 0 if successful and -1 if fails.
+!
+! AUTHOR
+! M. Scot Breitenfeld
+! Feb, 10 2016
+!
+! HISTORY
+!
+! SOURCE
+ SUBROUTINE h5pset_coll_metadata_write_f(plist_id, is_collective, hdferr)
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN) :: plist_id
+ LOGICAL, INTENT(IN) :: is_collective
+ INTEGER, INTENT(OUT) :: hdferr
+!*****
+ LOGICAL(C_BOOL) :: c_is_collective
+
+ INTERFACE
+ INTEGER FUNCTION h5pset_coll_metadata_write(plist_id, is_collective) BIND(C, NAME='H5Pset_coll_metadata_write')
+ IMPORT :: HID_T, C_BOOL
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN), VALUE :: plist_id
+ LOGICAL(C_BOOL), INTENT(IN), VALUE :: is_collective
+ END FUNCTION h5pset_coll_metadata_write
+ END INTERFACE
+
+ ! Transfer value of Fortran LOGICAL to C c_bool type
+ c_is_collective = is_collective
+
+ hdferr = INT(H5Pset_coll_metadata_write(plist_id, c_is_collective))
+
+ END SUBROUTINE h5pset_coll_metadata_write_f
+
+!****s* H5P/h5pget_coll_metadata_write_f
+! NAME
+! h5pget_coll_metadata_write_f
+!
+! PURPOSE
+! Retrieves metadata write mode from the file access property list.
+!
+! INPUTS
+! plist_id - File access property list identifier.
+! OUTPUTS
+! is_collective - Collective access setting.
+! hdferr - Returns 0 if successful and -1 if fails.
+!
+! AUTHOR
+! M. Scot Breitenfeld
+! Feb, 10 2016
+!
+! HISTORY
+!
+! SOURCE
+ SUBROUTINE h5pget_coll_metadata_write_f(plist_id, is_collective, hdferr)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN) :: plist_id
+ LOGICAL, INTENT(OUT) :: is_collective
+ INTEGER, INTENT(OUT) :: hdferr
+!*****
+ LOGICAL(C_BOOL) :: c_is_collective
+
+ INTERFACE
+ INTEGER FUNCTION h5pget_coll_metadata_write(plist_id, is_collective) BIND(C, NAME='H5Pget_coll_metadata_write')
+ IMPORT :: HID_T, C_BOOL
+ IMPLICIT NONE
+ INTEGER(HID_T) , INTENT(IN), VALUE :: plist_id
+ LOGICAL(C_BOOL), INTENT(OUT) :: is_collective
+ END FUNCTION h5pget_coll_metadata_write
+ END INTERFACE
+
+ hdferr = INT(H5Pget_coll_metadata_write(plist_id, c_is_collective))
+
+ ! Transfer value of C c_bool type to Fortran LOGICAL
+ is_collective = c_is_collective
+
+ END SUBROUTINE h5pget_coll_metadata_write_f
+
#endif
!
@@ -7836,6 +8018,7 @@ SUBROUTINE h5pget_virtual_dsetname_f(dcpl_id, index, name, hdferr, name_len)
END SUBROUTINE h5pget_virtual_dsetname_f
+
END MODULE H5P
diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in
index 42b5642..3a5a91f 100644
--- a/fortran/src/hdf5_fortrandll.def.in
+++ b/fortran/src/hdf5_fortrandll.def.in
@@ -334,6 +334,10 @@ H5P_mp_H5PGET_VIRTUAL_DSETNAME_F
@H5_NOPAREXP@H5P_mp_H5PSET_DXPL_MPIO_F
@H5_NOPAREXP@H5P_mp_H5PGET_DXPL_MPIO_F
@H5_NOPAREXP@H5P_mp_H5PGET_MPIO_ACTUAL_IO_MODE_F
+@H5_NOPAREXP@H5P_mp_H5PSET_ALL_COLL_METADATA_OPS_F
+@H5_NOPAREXP@H5P_mp_H5PGET_ALL_COLL_METADATA_OPS_F
+@H5_NOPAREXP@H5P_mp_H5PSET_COLL_METADATA_WRITE_F
+@H5_NOPAREXP@H5P_mp_H5PGET_COLL_METADATA_WRITE_F
; H5R
H5R_mp_H5RCREATE_OBJECT_F
H5R_mp_H5RCREATE_REGION_F
diff --git a/fortran/test/tf.F90 b/fortran/test/tf.F90
index 7d67f30..f8629ba 100644
--- a/fortran/test/tf.F90
+++ b/fortran/test/tf.F90
@@ -89,7 +89,7 @@ CONTAINS
error_string = skip
ENDIF
- WRITE(*, fmt = '(A, T72, A)') test_title, error_string
+ WRITE(*, fmt = '(A, T80, A)') test_title, error_string
IF(test_result.GT.0) total_error = total_error + test_result
diff --git a/fortran/testpar/hyper.f90 b/fortran/testpar/hyper.f90
index a2e2e07..28c0b53 100644
--- a/fortran/testpar/hyper.f90
+++ b/fortran/testpar/hyper.f90
@@ -14,9 +14,9 @@
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-!//////////////////////////////////////////////////////////
+!
! writes/reads dataset by hyperslabs
-!//////////////////////////////////////////////////////////
+!
SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
USE HDF5
@@ -52,14 +52,15 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CHARACTER(len=80) :: filename ! filename
INTEGER :: i
INTEGER :: actual_io_mode ! The type of I/O performed by this process
-
- !//////////////////////////////////////////////////////////
+ LOGICAL :: is_coll
+ LOGICAL :: is_coll_true = .TRUE.
+ !
! initialize the array data between the processes (3)
! for the 12 size array we get
! p0 = 1,2,3,4
! p1 = 5,6,7,8
! p2 = 9,10,11,12
- !//////////////////////////////////////////////////////////
+ !
ALLOCATE(wbuf(0:length-1),stat=hdferror)
IF (hdferror /= 0) THEN
@@ -81,17 +82,16 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
wbuf(i) = i
ENDDO
- !//////////////////////////////////////////////////////////
+ !
! HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
dims(1) = length
cdims(1) = length/mpi_size ! define chunks as the number of processes
- !//////////////////////////////////////////////////////////
+ !
! setup file access property list with parallel I/O access
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -106,14 +106,67 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
nerrors = nerrors + 1
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! create the file collectively
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5_fixname_f("parf1", filename, fapl_id, hdferror)
- CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
- CALL check("h5fcreate_f", hdferror, nerrors)
+ IF(do_collective)THEN
+ ! verify settings for file access properties
+
+ ! Collective metadata writes
+ CALL h5pget_coll_metadata_write_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_coll_metadata_write_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .FALSE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata writes"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata read API calling requirement
+ CALL h5pget_all_coll_metadata_ops_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_all_coll_metadata_ops_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .FALSE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata API calls requirement"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata writes
+ CALL h5pset_coll_metadata_write_f(fapl_id, .TRUE., hdferror)
+ CALL check("h5pset_coll_metadata_write_f", hdferror, nerrors)
+ ! Collective metadata READ API calling requirement
+ CALL h5pset_all_coll_metadata_ops_f(fapl_id, is_coll_true, hdferror)
+ CALL check("h5pset_all_coll_metadata_ops_f", hdferror, nerrors)
+
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5fcreate_f", hdferror, nerrors)
+
+ ! close fapl and retrieve it from file
+ CALL h5pclose_f(fapl_id, hdferror)
+ CALL check("h5pclose_f", hdferror, nerrors)
+ CALL h5fget_access_plist_f(file_id, fapl_id, hdferror)
+ CALL check("h5fget_access_plist_f", hdferror, nerrors)
+
+ ! verify settings for file access properties
+
+ ! Collective metadata writes
+ CALL h5pget_coll_metadata_write_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_coll_metadata_write_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .TRUE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata writes"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata read API calling requirement
+ CALL h5pget_all_coll_metadata_ops_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_all_coll_metadata_ops_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .TRUE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata API calls requirement"
+ nerrors = nerrors + 1
+ ENDIF
+ ELSE
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5fcreate_f", hdferror, nerrors)
+ ENDIF
CALL h5screate_simple_f(1, dims, fspace_id, hdferror)
CALL check("h5screate_simple_f", hdferror, nerrors)
@@ -121,9 +174,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5screate_simple_f(1, dims, mspace_id, hdferror)
CALL check("h5screate_simple_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! modify dataset creation properties to enable chunking
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -133,38 +186,38 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL check("h5pset_chunk_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! create the dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dcreate_f(file_id, "dset", H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id)
CALL check("h5dcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! define hyperslab
- !//////////////////////////////////////////////////////////
+ !
counti(1) = icount
start(1) = istart
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in memory
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in the file
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! create a property list for collective dataset write
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -174,9 +227,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! write dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id)
CALL check("h5dwrite_f", hdferror, nerrors)
@@ -200,9 +253,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
ENDIF
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! close HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
CALL h5pclose_f(fapl_id, hdferror)
CALL check("h5pclose_f", hdferror, nerrors)
@@ -225,9 +278,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5fclose_f(file_id, hdferror)
CALL check("h5fclose_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! reopen file with read access
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -247,23 +300,23 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5dopen_f(file_id, "dset", dset_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in memory
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in the file
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! create a property list for collective dataset read
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -273,16 +326,16 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL check("h5pcreate_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! read dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id)
CALL check("h5pcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! close HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
CALL h5pclose_f(fapl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -302,9 +355,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5fclose_f(file_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! compare read and write data. each process compares a subset of the array
- !//////////////////////////////////////////////////////////
+ !
DO i = istart, iend-1
IF( wbuf(i) /= rbuf(i)) THEN
diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90
index 69594b0..82dcc09 100644
--- a/fortran/testpar/ptest.f90
+++ b/fortran/testpar/ptest.f90
@@ -13,29 +13,35 @@
! access to either file, you may request a copy from help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-!//////////////////////////////////////////////////////////
+!
! main program for parallel HDF5 Fortran tests
-!//////////////////////////////////////////////////////////
+!
PROGRAM parallel_test
USE hdf5
+ USE MPI
+ USE TH5_MISC
IMPLICIT NONE
- INCLUDE 'mpif.h'
INTEGER :: mpierror ! MPI hdferror flag
INTEGER :: hdferror ! HDF hdferror flag
- LOGICAL :: do_collective ! use collective MPI I/O
- LOGICAL :: do_chunk ! use chunking
- INTEGER :: nerrors = 0 ! number of errors
+ INTEGER :: ret_total_error = 0 ! number of errors in subroutine
+ INTEGER :: total_error = 0 ! sum of the number of errors
INTEGER :: mpi_size ! number of processes in the group of communicator
INTEGER :: mpi_rank ! rank of the calling process in the communicator
INTEGER :: length = 12000 ! length of array
-
- !//////////////////////////////////////////////////////////
+ INTEGER :: i,j
+ ! use collective MPI I/O
+ LOGICAL, DIMENSION(1:2) :: do_collective = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=11), DIMENSION(1:2) :: chr_collective =(/"independent", "collective "/)
+ ! use chunking
+ LOGICAL, DIMENSION(1:2) :: do_chunk = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=10), DIMENSION(1:2) :: chr_chunk =(/"contiguous", "chunk "/)
+
+ !
! initialize MPI
- !//////////////////////////////////////////////////////////
-
+ !
CALL mpi_init(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_INIT *FAILED*"
@@ -48,74 +54,40 @@ PROGRAM parallel_test
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! initialize the HDF5 fortran interface
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5open_f(hdferror)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
+ !
+ ! test write/read dataset by hyperslabs (contiguous/chunk) with independent/collective MPI I/O
+ !
+ DO i = 1, 2
+ DO j = 1, 2
+ ret_total_error = 0
+ CALL hyper(length, do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ "Writing/reading dataset by hyperslabs ("//TRIM(chr_chunk(i))//" layout, "//TRIM(chr_collective(j))//" MPI I/O)", &
+ total_error)
+ ENDDO
+ ENDDO
+
+ !
! test write/read several datasets (independent MPI I/O)
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+ !
+ ret_total_error = 0
+ CALL multiple_dset_write(length, do_collective(1), do_chunk(1), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ 'Writing/reading several datasets (contiguous layout, independent MPI I/O)', total_error)
-
- !//////////////////////////////////////////////////////////
+ !
! close HDF5 interface
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5close_f(hdferror)
- !//////////////////////////////////////////////////////////
+ !
! close MPI
- !//////////////////////////////////////////////////////////
-
- IF (nerrors == 0) THEN
+ !
+ IF (total_error == 0) THEN
CALL mpi_finalize(mpierror)
IF (mpierror .NE. MPI_SUCCESS) THEN
WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
@@ -127,10 +99,7 @@ PROGRAM parallel_test
WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
ENDIF
ENDIF
-
- !//////////////////////////////////////////////////////////
+ !
! end main program
- !//////////////////////////////////////////////////////////
-
+ !
END PROGRAM parallel_test
-