diff options
Diffstat (limited to 'fortran')
-rw-r--r-- | fortran/testpar/hyper.f90 | 457 | ||||
-rw-r--r-- | fortran/testpar/mdset.f90 | 487 | ||||
-rw-r--r-- | fortran/testpar/ptest.f90 | 212 |
3 files changed, 583 insertions, 573 deletions
diff --git a/fortran/testpar/hyper.f90 b/fortran/testpar/hyper.f90 index fc0d1ff..a6d72dd 100644 --- a/fortran/testpar/hyper.f90 +++ b/fortran/testpar/hyper.f90 @@ -18,286 +18,283 @@ ! writes/reads dataset by hyperslabs !////////////////////////////////////////////////////////// -subroutine hyper(lenght,do_collective,do_chunk,nerrors) -use hdf5 -implicit none -include 'mpif.h' - -integer, intent(in) :: lenght ! array lenght -logical, intent(in) :: do_collective ! use collective I/O -logical, intent(in) :: do_chunk ! use chunking -integer, intent(inout) :: nerrors ! number of errors -integer :: mpierror ! MPI hdferror flag -integer :: hdferror ! HDF hdferror flag -integer :: mpi_size ! number of processes in the group of communicator -integer :: mpi_rank ! rank of the calling process in the communicator -integer(hsize_t), dimension(1) :: dims ! dataset dimensions -integer(hsize_t), dimension(1) :: cdims ! chunk dimensions -integer, allocatable :: wbuf(:) ! write buffer -integer, allocatable :: rbuf(:) ! read buffer -integer(hsize_t), dimension(1) :: counti ! hyperslab selection -integer(hsize_t), dimension(1) :: start ! hyperslab selection -integer(hid_t) :: fapl_id ! file access identifier -integer(hid_t) :: dxpl_id ! dataset transfer property list -integer(hid_t) :: dcpl_id ! dataset creation property list -integer(hid_t) :: file_id ! file identifier -integer(hid_t) :: dset_id ! dataset identifier -integer(hid_t) :: fspace_id ! file space identifier -integer(hid_t) :: mspace_id ! memory space identifier -integer(hid_t) :: driver_id ! low-level file driver identifier -integer :: istart ! start position in array -integer :: iend ! end position in array -integer :: icount ! number of elements in array -character(len=80) :: filename ! filename -integer :: i - -call mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror ) -call mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror ) +SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) + USE hdf5 + IMPLICIT NONE + INCLUDE 'mpif.h' + + INTEGER, INTENT(in) :: length ! array length + LOGICAL, INTENT(in) :: do_collective ! use collective I/O + LOGICAL, INTENT(in) :: do_chunk ! use chunking + INTEGER, INTENT(in) :: mpi_size ! number of processes in the group of communicator + INTEGER, INTENT(in) :: mpi_rank ! rank of the calling process in the communicator + INTEGER, INTENT(inout) :: nerrors ! number of errors + INTEGER :: mpierror ! MPI hdferror flag + INTEGER :: hdferror ! HDF hdferror flag + INTEGER(hsize_t), DIMENSION(1) :: dims ! dataset dimensions + INTEGER(hsize_t), DIMENSION(1) :: cdims ! chunk dimensions + INTEGER, ALLOCATABLE :: wbuf(:) ! write buffer + INTEGER, ALLOCATABLE :: rbuf(:) ! read buffer + INTEGER(hsize_t), DIMENSION(1) :: counti ! hyperslab selection + INTEGER(hsize_t), DIMENSION(1) :: start ! hyperslab selection + INTEGER(hid_t) :: fapl_id ! file access identifier + INTEGER(hid_t) :: dxpl_id ! dataset transfer property list + INTEGER(hid_t) :: dcpl_id ! dataset creation property list + INTEGER(hid_t) :: file_id ! file identifier + INTEGER(hid_t) :: dset_id ! dataset identifier + INTEGER(hid_t) :: fspace_id ! file space identifier + INTEGER(hid_t) :: mspace_id ! memory space identifier + INTEGER(hid_t) :: driver_id ! low-level file driver identifier + INTEGER :: istart ! start position in array + INTEGER :: iend ! end position in array + INTEGER :: icount ! number of elements in array + CHARACTER(len=80) :: filename ! filename + INTEGER :: i + + !////////////////////////////////////////////////////////// + ! initialize the array data between the processes (3) + ! for the 12 size array we get + ! p0 = 1,2,3,4 + ! p1 = 5,6,7,8 + ! p2 = 9,10,11,12 + !////////////////////////////////////////////////////////// + + ALLOCATE(wbuf(0:length-1),stat=hdferror) + IF (hdferror /= 0) THEN + WRITE(*,*) 'allocate error' + RETURN + ENDIF + + ALLOCATE(rbuf(0:length-1),stat=hdferror) + IF (hdferror /= 0) THEN + WRITE(*,*) 'allocate error' + RETURN + ENDIF + + icount = length/mpi_size ! divide the array by the number of processes + istart = mpi_rank*icount ! start position + iend = istart + icount ! end position + + DO i = istart, iend-1 + wbuf(i) = i + ENDDO + + !////////////////////////////////////////////////////////// + ! HDF5 I/O + !////////////////////////////////////////////////////////// + + dims(1) = length + cdims(1) = length/mpi_size ! define chunks as the number of processes + + !////////////////////////////////////////////////////////// + ! setup file access property list with parallel I/O access + !////////////////////////////////////////////////////////// + + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, nerrors) + + CALL h5pget_driver_f(fapl_id, driver_id, hdferror) + CALL check("h5pget_driver_f", hdferror, nerrors) + + IF( driver_id /= H5FD_MPIO_F) THEN + WRITE(*,*) "Wrong driver information returned" + nerrors = nerrors + 1 + ENDIF + + !////////////////////////////////////////////////////////// + ! create the file collectively + !////////////////////////////////////////////////////////// + + CALL h5_fixname_f("parf1", filename, fapl_id, hdferror) + + CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id) + CALL check("h5fcreate_f", hdferror, nerrors) + + CALL h5screate_simple_f(1, dims, fspace_id, hdferror) + CALL check("h5screate_simple_f", hdferror, nerrors) + + CALL h5screate_simple_f(1, dims, mspace_id, hdferror) + CALL check("h5screate_simple_f", hdferror, nerrors) + + !////////////////////////////////////////////////////////// + ! modify dataset creation properties to enable chunking + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! initialize the array data between the processes (3) -! for the 12 size array we get -! p0 = 1,2,3,4 -! p1 = 5,6,7,8 -! p2 = 9,10,11,12 -!////////////////////////////////////////////////////////// + CALL h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -allocate(wbuf(0:lenght-1),stat=hdferror) -if (hdferror /= 0) then - write(*,*) 'allocate error' - return -endif + IF (do_chunk) THEN + CALL h5pset_chunk_f(dcpl_id, 1, cdims, hdferror) + CALL check("h5pset_chunk_f", hdferror, nerrors) + ENDIF -allocate(rbuf(0:lenght-1),stat=hdferror) -if (hdferror /= 0) then - write(*,*) 'allocate error' - return -endif + !////////////////////////////////////////////////////////// + ! create the dataset + !////////////////////////////////////////////////////////// -icount = lenght/mpi_size ! divide the array by the number of processes -istart = mpi_rank*icount ! start position -iend = istart + icount ! end position + CALL h5dcreate_f(file_id, "dset", H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id) + CALL check("h5dcreate_f", hdferror, nerrors) -do i = istart, iend-1 - wbuf(i) = i -enddo + !////////////////////////////////////////////////////////// + ! define hyperslab + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! HDF5 I/O -!////////////////////////////////////////////////////////// + counti(1) = icount + start(1) = istart -dims(1) = lenght -cdims(1) = lenght/mpi_size ! define chunks as the number of processes + !////////////////////////////////////////////////////////// + ! select hyperslab in memory + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! setup file access property list with parallel I/O access -!////////////////////////////////////////////////////////// + CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, nerrors) -call h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! select hyperslab in the file + !////////////////////////////////////////////////////////// -call h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) -call check("h5pset_fapl_mpio_f", hdferror, nerrors) + CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, nerrors) -call h5pget_driver_f(fapl_id, driver_id, hdferror) -call check("h5pget_driver_f", hdferror, nerrors) - -if( driver_id /= H5FD_MPIO_F) then - write(*,*) "Wrong driver information returned" - nerrors = nerrors + 1 -endif -!////////////////////////////////////////////////////////// -! create the file collectively -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! create a property list for collective dataset write + !////////////////////////////////////////////////////////// -call h5_fixname_f("parf1", filename, fapl_id, hdferror) + CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id) -call check("h5fcreate_f", hdferror, nerrors) + IF (do_collective) THEN + CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) + CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors) + ENDIF -call h5screate_simple_f(1, dims, fspace_id, hdferror) -call check("h5screate_simple_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! write dataset + !////////////////////////////////////////////////////////// -call h5screate_simple_f(1, dims, mspace_id, hdferror) -call check("h5screate_simple_f", hdferror, nerrors) + CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) + CALL check("h5dwrite_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! modify dataset creation properties to enable chunking -!////////////////////////////////////////////////////////// -call h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! close HDF5 I/O + !////////////////////////////////////////////////////////// -if (do_chunk) then - call h5pset_chunk_f(dcpl_id, 1, cdims, hdferror) - call check("h5pset_chunk_f", hdferror, nerrors) -endif + CALL h5pclose_f(fapl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! create the dataset -!////////////////////////////////////////////////////////// + CALL h5pclose_f(dcpl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -call h5dcreate_f(file_id, "dset", H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id) -call check("h5dcreate_f", hdferror, nerrors) + CALL h5pclose_f(dxpl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! define hyperslab -!////////////////////////////////////////////////////////// + CALL h5sclose_f(mspace_id, hdferror) + CALL check("h5sclose_f", hdferror, nerrors) -counti(1) = icount -start(1) = istart + CALL h5sclose_f(fspace_id, hdferror) + CALL check("h5sclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in memory -!////////////////////////////////////////////////////////// + CALL h5dclose_f(dset_id, hdferror) + CALL check("h5dclose_f", hdferror, nerrors) -call h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5sselect_hyperslab_f", hdferror, nerrors) + CALL h5fclose_f(file_id, hdferror) + CALL check("h5fclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in the file -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! reopen file with read access + !////////////////////////////////////////////////////////// -call h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5sselect_hyperslab_f", hdferror, nerrors) + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! create a property list for collective dataset write -!////////////////////////////////////////////////////////// + CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -if (do_collective) then - call h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) - call check("h5pset_dxpl_mpio_f", hdferror, nerrors) -endif + CALL h5screate_simple_f(1, dims, fspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! write dataset -!////////////////////////////////////////////////////////// + CALL h5screate_simple_f(1, dims, mspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) -call check("h5dwrite_f", hdferror, nerrors) + CALL h5dopen_f(file_id, "dset", dset_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! select hyperslab in memory + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! close HDF5 I/O -!////////////////////////////////////////////////////////// + CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pclose_f(fapl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! select hyperslab in the file + !////////////////////////////////////////////////////////// -call h5pclose_f(dcpl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pclose_f(dxpl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! create a property list for collective dataset read + !////////////////////////////////////////////////////////// -call h5sclose_f(mspace_id, hdferror) -call check("h5sclose_f", hdferror, nerrors) + CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5sclose_f(fspace_id, hdferror) -call check("h5sclose_f", hdferror, nerrors) + IF (do_collective) THEN + CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + ENDIF -call h5dclose_f(dset_id, hdferror) -call check("h5dclose_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! read dataset + !////////////////////////////////////////////////////////// -call h5fclose_f(file_id, hdferror) -call check("h5fclose_f", hdferror, nerrors) + CALL h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! reopen file with read access -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! close HDF5 I/O + !////////////////////////////////////////////////////////// -call h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5pclose_f(fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5pclose_f(dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5sclose_f(fspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5screate_simple_f(1, dims, fspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5sclose_f(mspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5screate_simple_f(1, dims, mspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5dclose_f(dset_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5dopen_f(file_id, "dset", dset_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5fclose_f(file_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in memory -!////////////////////////////////////////////////////////// - -call h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! select hyperslab in the file -!////////////////////////////////////////////////////////// - -call h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! create a property list for collective dataset read -!////////////////////////////////////////////////////////// - -call h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -if (do_collective) then - call h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) - call check("h5pcreate_f", hdferror, nerrors) -endif - -!////////////////////////////////////////////////////////// -! read dataset -!////////////////////////////////////////////////////////// - -call h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! close HDF5 I/O -!////////////////////////////////////////////////////////// - -call h5pclose_f(fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -call h5pclose_f(dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -call h5sclose_f(fspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -call h5sclose_f(mspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -call h5dclose_f(dset_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -call h5fclose_f(file_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! compare read and write data. each process compares a subset of the array -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! compare read and write data. each process compares a subset of the array + !////////////////////////////////////////////////////////// -do i = istart, iend-1 - if( wbuf(i) /= rbuf(i)) then - write(*,*) 'buffers differs at ', i, rbuf(i), wbuf(i) - nerrors = nerrors + 1 - endif -enddo + DO i = istart, iend-1 + IF( wbuf(i) /= rbuf(i)) THEN + WRITE(*,*) 'buffers differs at ', i, rbuf(i), wbuf(i) + nerrors = nerrors + 1 + ENDIF + ENDDO -deallocate(wbuf) -deallocate(rbuf) + DEALLOCATE(wbuf) + DEALLOCATE(rbuf) -end subroutine hyper +END SUBROUTINE hyper diff --git a/fortran/testpar/mdset.f90 b/fortran/testpar/mdset.f90 index 0281934..dcc7210 100644 --- a/fortran/testpar/mdset.f90 +++ b/fortran/testpar/mdset.f90 @@ -18,304 +18,301 @@ ! writes/reads dataset by hyperslabs !////////////////////////////////////////////////////////// -subroutine multiple_dset_write(lenght,do_collective,do_chunk,nerrors) -use hdf5 -implicit none -include 'mpif.h' - -integer, intent(in) :: lenght ! array lenght -logical, intent(in) :: do_collective ! use collective I/O -logical, intent(in) :: do_chunk ! use chunking -integer, intent(inout) :: nerrors ! number of errors -integer :: mpierror ! MPI hdferror flag -integer :: hdferror ! HDF hdferror flag -integer :: mpi_size ! number of processes in the group of communicator -integer :: mpi_rank ! rank of the calling process in the communicator -integer(hsize_t), dimension(1) :: dims ! dataset dimensions -integer(hsize_t), dimension(1) :: cdims ! chunk dimensions -integer, allocatable :: wbuf(:) ! write buffer -integer, allocatable :: rbuf(:) ! read buffer -integer(hsize_t), dimension(1) :: counti ! hyperslab selection -integer(hsize_t), dimension(1) :: start ! hyperslab selection -integer(hid_t) :: fapl_id ! file access identifier -integer(hid_t) :: dxpl_id ! dataset transfer property list -integer(hid_t) :: dcpl_id ! dataset creation property list -integer(hid_t) :: file_id ! file identifier -integer(hid_t) :: dset_id ! dataset identifier -integer(hid_t) :: fspace_id ! file space identifier -integer(hid_t) :: mspace_id ! memory space identifier -integer(hid_t) :: driver_id ! low-level file driver identifier -integer :: istart ! start position in array -integer :: iend ! end position in array -integer :: icount ! number of elements in array -character(len=80) :: filename ! filename -character(len=80) :: dsetname ! dataset name -integer :: n, i - -call mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror ) -call mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror ) +SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + USE hdf5 + IMPLICIT NONE + INCLUDE 'mpif.h' + + INTEGER, INTENT(in) :: length ! array length + LOGICAL, INTENT(in) :: do_collective ! use collective I/O + LOGICAL, INTENT(in) :: do_chunk ! use chunking + INTEGER, INTENT(in) :: mpi_size ! number of processes in the group of communicator + INTEGER, INTENT(in) :: mpi_rank ! rank of the calling process in the communicator + INTEGER, INTENT(inout) :: nerrors ! number of errors + INTEGER :: mpierror ! MPI hdferror flag + INTEGER :: hdferror ! HDF hdferror flag + INTEGER(hsize_t), DIMENSION(1) :: dims ! dataset dimensions + INTEGER(hsize_t), DIMENSION(1) :: cdims ! chunk dimensions + INTEGER, ALLOCATABLE :: wbuf(:) ! write buffer + INTEGER, ALLOCATABLE :: rbuf(:) ! read buffer + INTEGER(hsize_t), DIMENSION(1) :: counti ! hyperslab selection + INTEGER(hsize_t), DIMENSION(1) :: start ! hyperslab selection + INTEGER(hid_t) :: fapl_id ! file access identifier + INTEGER(hid_t) :: dxpl_id ! dataset transfer property list + INTEGER(hid_t) :: dcpl_id ! dataset creation property list + INTEGER(hid_t) :: file_id ! file identifier + INTEGER(hid_t) :: dset_id ! dataset identifier + INTEGER(hid_t) :: fspace_id ! file space identifier + INTEGER(hid_t) :: mspace_id ! memory space identifier + INTEGER(hid_t) :: driver_id ! low-level file driver identifier + INTEGER :: istart ! start position in array + INTEGER :: iend ! end position in array + INTEGER :: icount ! number of elements in array + CHARACTER(len=80) :: filename ! filename + CHARACTER(len=80) :: dsetname ! dataset name + INTEGER :: n, i + + !////////////////////////////////////////////////////////// + ! initialize the array data between the processes (3) + ! for the 12 size array we get + ! p0 = 1,2,3,4 + ! p1 = 5,6,7,8 + ! p2 = 9,10,11,12 + !////////////////////////////////////////////////////////// + + ALLOCATE(wbuf(0:length-1),stat=hdferror) + IF (hdferror /= 0) THEN + WRITE(*,*) 'allocate error' + RETURN + ENDIF + + ALLOCATE(rbuf(0:length-1),stat=hdferror) + IF (hdferror /= 0) THEN + WRITE(*,*) 'allocate error' + RETURN + ENDIF + + icount = length/mpi_size ! divide the array by the number of processes + istart = mpi_rank*icount ! start position + iend = istart + icount ! end position + + !////////////////////////////////////////////////////////// + ! HDF5 I/O + !////////////////////////////////////////////////////////// + + dims(1) = length + cdims(1) = length/mpi_size ! define chunks as the number of processes + + !////////////////////////////////////////////////////////// + ! setup file access property list with parallel I/O access + !////////////////////////////////////////////////////////// + + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, nerrors) + + CALL h5pget_driver_f(fapl_id, driver_id, hdferror) + CALL check("h5pget_driver_f", hdferror, nerrors) + + IF( driver_id /= H5FD_MPIO_F) THEN + WRITE(*,*) "Wrong driver information returned" + nerrors = nerrors + 1 + ENDIF + + !////////////////////////////////////////////////////////// + ! create the file collectively + !////////////////////////////////////////////////////////// + + CALL h5_fixname_f("parf2", filename, fapl_id, hdferror) + + CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id) + CALL check("h5fcreate_f", hdferror, nerrors) + + CALL h5screate_simple_f(1, dims, fspace_id, hdferror) + CALL check("h5screate_simple_f", hdferror, nerrors) + + CALL h5screate_simple_f(1, dims, mspace_id, hdferror) + CALL check("h5screate_simple_f", hdferror, nerrors) + + !////////////////////////////////////////////////////////// + ! modify dataset creation properties to enable chunking + !////////////////////////////////////////////////////////// + + CALL h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + + IF (do_chunk) THEN + CALL h5pset_chunk_f(dcpl_id, 1, cdims, hdferror) + CALL check("h5pset_chunk_f", hdferror, nerrors) + ENDIF + + !////////////////////////////////////////////////////////// + ! create a property list for collective dataset write + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! initialize the array data between the processes (3) -! for the 12 size array we get -! p0 = 1,2,3,4 -! p1 = 5,6,7,8 -! p2 = 9,10,11,12 -!////////////////////////////////////////////////////////// + CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -allocate(wbuf(0:lenght-1),stat=hdferror) -if (hdferror /= 0) then - write(*,*) 'allocate error' - return -endif + IF (do_collective) THEN + CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) + CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors) + ENDIF -allocate(rbuf(0:lenght-1),stat=hdferror) -if (hdferror /= 0) then - write(*,*) 'allocate error' - return -endif + !////////////////////////////////////////////////////////// + ! define hyperslab + !////////////////////////////////////////////////////////// -icount = lenght/mpi_size ! divide the array by the number of processes -istart = mpi_rank*icount ! start position -iend = istart + icount ! end position + counti(1) = icount + start(1) = istart -!////////////////////////////////////////////////////////// -! HDF5 I/O -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! select hyperslab in memory + !////////////////////////////////////////////////////////// -dims(1) = lenght -cdims(1) = lenght/mpi_size ! define chunks as the number of processes + CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! setup file access property list with parallel I/O access -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! select hyperslab in the file + !////////////////////////////////////////////////////////// -call h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, nerrors) -call h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) -call check("h5pset_fapl_mpio_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! create and write the datasets + !////////////////////////////////////////////////////////// -call h5pget_driver_f(fapl_id, driver_id, hdferror) -call check("h5pget_driver_f", hdferror, nerrors) - -if( driver_id /= H5FD_MPIO_F) then - write(*,*) "Wrong driver information returned" - nerrors = nerrors + 1 -endif + DO n = 1, 300 -!////////////////////////////////////////////////////////// -! create the file collectively -!////////////////////////////////////////////////////////// - -call h5_fixname_f("parf2", filename, fapl_id, hdferror) + ! direct the output of the write statement to unit "dsetname" + WRITE(dsetname,*) 'dataset', n -call h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id) -call check("h5fcreate_f", hdferror, nerrors) + ! create this dataset + CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id) + CALL check("h5dcreate_f", hdferror, nerrors) -call h5screate_simple_f(1, dims, fspace_id, hdferror) -call check("h5screate_simple_f", hdferror, nerrors) + DO i = istart, iend-1 + wbuf(i) = n + mpi_rank + ENDDO -call h5screate_simple_f(1, dims, mspace_id, hdferror) -call check("h5screate_simple_f", hdferror, nerrors) + ! write this dataset + CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) + CALL check("h5dwrite_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! modify dataset creation properties to enable chunking -!////////////////////////////////////////////////////////// + ! close this dataset + CALL h5dclose_f(dset_id, hdferror) + CALL check("h5dclose_f", hdferror, nerrors) -call h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + ENDDO -if (do_chunk) then - call h5pset_chunk_f(dcpl_id, 1, cdims, hdferror) - call check("h5pset_chunk_f", hdferror, nerrors) -endif + !////////////////////////////////////////////////////////// + ! close HDF5 I/O + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! create a property list for collective dataset write -!////////////////////////////////////////////////////////// + CALL h5pclose_f(fapl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -call h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -if (do_collective) then - call h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) - call check("h5pset_dxpl_mpio_f", hdferror, nerrors) -endif + CALL h5pclose_f(dcpl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! define hyperslab -!////////////////////////////////////////////////////////// + CALL h5pclose_f(dxpl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) -counti(1) = icount -start(1) = istart + CALL h5sclose_f(mspace_id, hdferror) + CALL check("h5sclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in memory -!////////////////////////////////////////////////////////// + CALL h5sclose_f(fspace_id, hdferror) + CALL check("h5sclose_f", hdferror, nerrors) -call h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5sselect_hyperslab_f", hdferror, nerrors) + CALL h5fclose_f(file_id, hdferror) + CALL check("h5fclose_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in the file -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! reopen file with read access + !////////////////////////////////////////////////////////// -call h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5sselect_hyperslab_f", hdferror, nerrors) + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! create and write the datasets -!////////////////////////////////////////////////////////// + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -do n = 1, 300 + CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id) + CALL check("h5pcreate_f", hdferror, nerrors) -! direct the output of the write statement to unit "dsetname" - write(dsetname,*) 'dataset', n + CALL h5screate_simple_f(1, dims, fspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -! create this dataset - call h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id) - call check("h5dcreate_f", hdferror, nerrors) + CALL h5screate_simple_f(1, dims, mspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) - do i = istart, iend-1 - wbuf(i) = n + mpi_rank - enddo + !////////////////////////////////////////////////////////// + ! select hyperslab in memory + !////////////////////////////////////////////////////////// -! write this dataset - call h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) - call check("h5dwrite_f", hdferror, nerrors) + CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -! close this dataset - call h5dclose_f(dset_id, hdferror) - call check("h5dclose_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! select hyperslab in the file + !////////////////////////////////////////////////////////// -enddo + CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! close HDF5 I/O -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! create a property list for collective dataset read + !////////////////////////////////////////////////////////// -call h5pclose_f(fapl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pclose_f(dcpl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + IF (do_collective) THEN + CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + ENDIF -call h5pclose_f(dxpl_id, hdferror) -call check("h5pclose_f", hdferror, nerrors) + !////////////////////////////////////////////////////////// + ! read dataset + !////////////////////////////////////////////////////////// -call h5sclose_f(mspace_id, hdferror) -call check("h5sclose_f", hdferror, nerrors) + DO n = 1, 300 -call h5sclose_f(fspace_id, hdferror) -call check("h5sclose_f", hdferror, nerrors) + ! direct the output of the write statement to unit "dsetname" + WRITE(dsetname,*) 'dataset', n -call h5fclose_f(file_id, hdferror) -call check("h5fclose_f", hdferror, nerrors) + ! create this dataset + CALL h5dopen_f(file_id, dsetname, dset_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! reopen file with read access -!////////////////////////////////////////////////////////// + ! read this dataset + CALL h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + ! close this dataset + CALL h5dclose_f(dset_id, hdferror) + CALL check("h5dclose_f", hdferror, nerrors) -call h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + DO i = istart, iend-1 + wbuf(i) = n + mpi_rank + ENDDO -call h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id) -call check("h5pcreate_f", hdferror, nerrors) + ! compare read and write data. each process compares a subset of the array + DO i = istart, iend-1 + IF( wbuf(i) /= rbuf(i)) THEN + WRITE(*,*) 'buffers differs at ', i, rbuf(i), wbuf(i) + nerrors = nerrors + 1 + ENDIF + ENDDO -call h5screate_simple_f(1, dims, fspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + ENDDO -call h5screate_simple_f(1, dims, mspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) -!////////////////////////////////////////////////////////// -! select hyperslab in memory -!////////////////////////////////////////////////////////// - -call h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! select hyperslab in the file -!////////////////////////////////////////////////////////// - -call h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -!////////////////////////////////////////////////////////// -! create a property list for collective dataset read -!////////////////////////////////////////////////////////// - -call h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) - -if (do_collective) then - call h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror) - call check("h5pcreate_f", hdferror, nerrors) -endif - -!////////////////////////////////////////////////////////// -! read dataset -!////////////////////////////////////////////////////////// - -do n = 1, 300 - -! direct the output of the write statement to unit "dsetname" - write(dsetname,*) 'dataset', n - -! create this dataset - call h5dopen_f(file_id, dsetname, dset_id, hdferror) - call check("h5pcreate_f", hdferror, nerrors) - -! read this dataset - call h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) - call check("h5pcreate_f", hdferror, nerrors) - -! close this dataset - call h5dclose_f(dset_id, hdferror) - call check("h5dclose_f", hdferror, nerrors) - - do i = istart, iend-1 - wbuf(i) = n + mpi_rank - enddo - -! compare read and write data. each process compares a subset of the array - do i = istart, iend-1 - if( wbuf(i) /= rbuf(i)) then - write(*,*) 'buffers differs at ', i, rbuf(i), wbuf(i) - nerrors = nerrors + 1 - endif - enddo - -enddo - - -!////////////////////////////////////////////////////////// -! close HDF5 I/O -!////////////////////////////////////////////////////////// + !////////////////////////////////////////////////////////// + ! close HDF5 I/O + !////////////////////////////////////////////////////////// -call h5pclose_f(fapl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5pclose_f(fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5pclose_f(dxpl_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5pclose_f(dxpl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5sclose_f(fspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5sclose_f(fspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5sclose_f(mspace_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5sclose_f(mspace_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -call h5fclose_f(file_id, hdferror) -call check("h5pcreate_f", hdferror, nerrors) + CALL h5fclose_f(file_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) -deallocate(wbuf) -deallocate(rbuf) + DEALLOCATE(wbuf) + DEALLOCATE(rbuf) -end subroutine multiple_dset_write +END SUBROUTINE multiple_dset_write diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90 index 80f4091..6f6fb2e 100644 --- a/fortran/testpar/ptest.f90 +++ b/fortran/testpar/ptest.f90 @@ -17,103 +17,119 @@ ! main program for parallel HDF5 Fortran tests !////////////////////////////////////////////////////////// -program parallel_test -use hdf5 -implicit none -include 'mpif.h' - -integer :: mpierror ! MPI hdferror flag -integer :: hdferror ! HDF hdferror flag -logical :: do_collective ! use collective MPI I/O -logical :: do_chunk ! use chunking -integer :: nerrors = 0 ! number of errors -integer :: mpi_rank ! rank of the calling process in the communicator -integer :: lenght = 12000 ! lenght of array +PROGRAM parallel_test + USE hdf5 + IMPLICIT NONE + INCLUDE 'mpif.h' + + INTEGER :: mpierror ! MPI hdferror flag + INTEGER :: hdferror ! HDF hdferror flag + LOGICAL :: do_collective ! use collective MPI I/O + LOGICAL :: do_chunk ! use chunking + INTEGER :: nerrors = 0 ! number of errors + INTEGER :: mpi_size ! number of processes in the group of communicator + INTEGER :: mpi_rank ! rank of the calling process in the communicator + INTEGER :: length = 12000 ! length of array + + !////////////////////////////////////////////////////////// + ! initialize MPI + !////////////////////////////////////////////////////////// + + CALL mpi_init(mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_INIT *FAILED*" + ENDIF + CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror ) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank + ENDIF + CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror ) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank + ENDIF + !////////////////////////////////////////////////////////// + ! initialize the HDF5 fortran interface + !////////////////////////////////////////////////////////// + + CALL h5open_f(hdferror) + + !////////////////////////////////////////////////////////// + ! test write/read dataset by hyperslabs with independent MPI I/O + !////////////////////////////////////////////////////////// + + IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)' + + do_collective = .FALSE. + do_chunk = .FALSE. + CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + + !////////////////////////////////////////////////////////// + ! test write/read dataset by hyperslabs with collective MPI I/O + !////////////////////////////////////////////////////////// + + IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)' + + do_collective = .TRUE. + do_chunk = .FALSE. + CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + + !////////////////////////////////////////////////////////// + ! test write/read dataset by hyperslabs with independent MPI I/O + !////////////////////////////////////////////////////////// + + IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)' + + do_collective = .FALSE. + do_chunk = .TRUE. + CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + + !////////////////////////////////////////////////////////// + ! test write/read dataset by hyperslabs with collective MPI I/O + !////////////////////////////////////////////////////////// + + IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)' + + do_collective = .TRUE. + do_chunk = .TRUE. + CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + + !////////////////////////////////////////////////////////// + ! test write/read several datasets (independent MPI I/O) + !////////////////////////////////////////////////////////// + + IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)' + + do_collective = .FALSE. + do_chunk = .FALSE. + CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + + + !////////////////////////////////////////////////////////// + ! close HDF5 interface + !////////////////////////////////////////////////////////// + + CALL h5close_f(hdferror) + + !////////////////////////////////////////////////////////// + ! close MPI + !////////////////////////////////////////////////////////// + + IF (nerrors == 0) THEN + CALL mpi_finalize(mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank + ENDIF + ELSE + WRITE(*,*) 'Errors detected in process ', mpi_rank + CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank + ENDIF + ENDIF + + !////////////////////////////////////////////////////////// + ! end main program + !////////////////////////////////////////////////////////// -!////////////////////////////////////////////////////////// -! initialize MPI -!////////////////////////////////////////////////////////// - -call mpi_init(mpierror) -call mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror ) - -!////////////////////////////////////////////////////////// -! initialize the HDF5 fortran interface -!////////////////////////////////////////////////////////// - -call h5open_f(hdferror) - -!////////////////////////////////////////////////////////// -! test write/read dataset by hyperslabs with independent MPI I/O -!////////////////////////////////////////////////////////// - -if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)' - -do_collective = .false. -do_chunk = .false. -call hyper(lenght,do_collective,do_chunk,nerrors) - -!////////////////////////////////////////////////////////// -! test write/read dataset by hyperslabs with collective MPI I/O -!////////////////////////////////////////////////////////// - -if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)' - -do_collective = .true. -do_chunk = .false. -call hyper(lenght,do_collective,do_chunk,nerrors) - -!////////////////////////////////////////////////////////// -! test write/read dataset by hyperslabs with independent MPI I/O -!////////////////////////////////////////////////////////// - -if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)' - -do_collective = .false. -do_chunk = .true. -call hyper(lenght,do_collective,do_chunk,nerrors) - -!////////////////////////////////////////////////////////// -! test write/read dataset by hyperslabs with collective MPI I/O -!////////////////////////////////////////////////////////// - -if (mpi_rank == 0) write(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)' - -do_collective = .true. -do_chunk = .true. -call hyper(lenght,do_collective,do_chunk,nerrors) - -!////////////////////////////////////////////////////////// -! test write/read several datasets (independent MPI I/O) -!////////////////////////////////////////////////////////// - -if (mpi_rank == 0) write(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)' - -do_collective = .false. -do_chunk = .false. -call multiple_dset_write(lenght,do_collective,do_chunk,nerrors) - - -!////////////////////////////////////////////////////////// -! close HDF5 interface -!////////////////////////////////////////////////////////// - -call h5close_f(hdferror) - -!////////////////////////////////////////////////////////// -! close MPI -!////////////////////////////////////////////////////////// - -if (nerrors == 0) then - call mpi_finalize(mpierror) -else - write(*,*) 'Errors detected in process ', mpi_rank - call mpi_abort(MPI_COMM_WORLD, 1, mpierror) -endif - -!////////////////////////////////////////////////////////// -! end main program -!////////////////////////////////////////////////////////// - -end program parallel_test +END PROGRAM parallel_test |