summaryrefslogtreecommitdiffstats
path: root/HDF5Examples
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2024-01-03 16:55:17 (GMT)
committerGitHub <noreply@github.com>2024-01-03 16:55:17 (GMT)
commit8f1a93f1a208055c9b872a06be28a20e72f8f488 (patch)
tree4791557ccb219f56111fa52a96462d319d1e4473 /HDF5Examples
parent812be1a3bdbc2fca620f85b3d0dc2f7216a9d463 (diff)
downloadhdf5-8f1a93f1a208055c9b872a06be28a20e72f8f488.zip
hdf5-8f1a93f1a208055c9b872a06be28a20e72f8f488.tar.gz
hdf5-8f1a93f1a208055c9b872a06be28a20e72f8f488.tar.bz2
New Fortran Examples added (#3916)
* added subfiling example * Added filtered writes with no selection example
Diffstat (limited to 'HDF5Examples')
-rw-r--r--HDF5Examples/CMakeLists.txt2
-rw-r--r--HDF5Examples/FORTRAN/H5G/h5_version.h.in23
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt1
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/Fortran_sourcefiles.cmake11
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_dataset.F903
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_file_create.F902
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_filtered_writes_no_sel.F90354
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_chunk.F903
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_col.F903
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_pattern.F902
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_row.F902
-rw-r--r--HDF5Examples/FORTRAN/H5PAR/ph5_f90_subfiling.F90521
12 files changed, 894 insertions, 33 deletions
diff --git a/HDF5Examples/CMakeLists.txt b/HDF5Examples/CMakeLists.txt
index 6f8b53c..63adad6 100644
--- a/HDF5Examples/CMakeLists.txt
+++ b/HDF5Examples/CMakeLists.txt
@@ -159,7 +159,7 @@ if (${H5_LIBVER_DIR} GREATER 16)
endif ()
configure_file (${H5EX_F90_SRC_DIR}/H5D/h5_version.h.in ${PROJECT_BINARY_DIR}/FORTRAN/H5D/h5_version.h @ONLY)
- configure_file (${H5EX_F90_SRC_DIR}/H5G/h5_version.h.in ${PROJECT_BINARY_DIR}/FORTRAN/H5G/h5_version.h @ONLY)
+ configure_file (${H5EX_F90_SRC_DIR}/H5D/h5_version.h.in ${PROJECT_BINARY_DIR}/FORTRAN/H5G/h5_version.h @ONLY)
else ()
set (HDF_BUILD_FORTRAN OFF CACHE BOOL "Build examples FORTRAN support" FORCE)
endif ()
diff --git a/HDF5Examples/FORTRAN/H5G/h5_version.h.in b/HDF5Examples/FORTRAN/H5G/h5_version.h.in
deleted file mode 100644
index 6827675..0000000
--- a/HDF5Examples/FORTRAN/H5G/h5_version.h.in
+++ /dev/null
@@ -1,23 +0,0 @@
-! Version numbers
-!
-! For major interface/format changes
-!
-#define H5_VERS_MAJOR @H5_VERS_MAJOR@
-!
-! For minor interface/format changes
-!
-#define H5_VERS_MINOR @H5_VERS_MINOR@
-!
-! For tweaks, bug-fixes, or development
-!
-#define H5_VERS_RELEASE @H5_VERS_RELEASE@
-
-! macros for comparing versions
-
-#define H5_VERSION_GE(Maj, Min, Rel) \
- (((H5_VERS_MAJOR == Maj) && (H5_VERS_MINOR == Min) && (H5_VERS_RELEASE >= Rel)) || \
- ((H5_VERS_MAJOR == Maj) && (H5_VERS_MINOR > Min)) || (H5_VERS_MAJOR > Maj))
-
-#define H5_VERSION_LE(Maj, Min, Rel) \
- (((H5_VERS_MAJOR == Maj) && (H5_VERS_MINOR == Min) && (H5_VERS_RELEASE <= Rel)) || \
- ((H5_VERS_MAJOR == Maj) && (H5_VERS_MINOR < Min)) || (H5_VERS_MAJOR < Maj))
diff --git a/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt b/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt
index d3124a1..866f3ef 100644
--- a/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt
+++ b/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt
@@ -12,6 +12,7 @@ project (HDF5Examples_FORTRAN_H5PAR Fortran)
INCLUDE_DIRECTORIES (
${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT}
${PROJECT_BINARY_DIR}
+ ${HDF5_F90_BINARY_DIR}
${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
)
diff --git a/HDF5Examples/FORTRAN/H5PAR/Fortran_sourcefiles.cmake b/HDF5Examples/FORTRAN/H5PAR/Fortran_sourcefiles.cmake
index 39c8940..af2bb57 100644
--- a/HDF5Examples/FORTRAN/H5PAR/Fortran_sourcefiles.cmake
+++ b/HDF5Examples/FORTRAN/H5PAR/Fortran_sourcefiles.cmake
@@ -9,3 +9,14 @@ set (examples
ph5_f90_hyperslab_by_pattern
ph5_f90_hyperslab_by_chunk
)
+
+if (HDF5_ENABLE_SUBFILING_VFD)
+ set (examples ${examples}
+ ph5_f90_subfiling
+ )
+endif()
+if (HDF5_VERSION_STRING VERSION_GREATER_EQUAL "1.14.4")
+ set (examples ${examples}
+ ph5_f90_filtered_writes_no_sel
+ )
+endif()
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_dataset.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_dataset.F90
index 9819ab3..f7e4185 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_dataset.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_dataset.F90
@@ -1,10 +1,9 @@
PROGRAM DATASET
USE HDF5 ! This module contains all necessary modules
+ USE MPI
IMPLICIT NONE
-
- INCLUDE 'mpif.h'
CHARACTER(LEN=10), PARAMETER :: filename = "sds.h5" ! File name
CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_file_create.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_file_create.F90
index 7944b5a..b5aa090 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_file_create.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_file_create.F90
@@ -5,10 +5,10 @@
PROGRAM FILE_CREATE
USE HDF5 ! This module contains all necessary modules
+ USE MPI
IMPLICIT NONE
- INCLUDE 'mpif.h'
CHARACTER(LEN=10), PARAMETER :: filename = "sds.h5" ! File name
INTEGER(HID_T) :: file_id ! File identifier
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_filtered_writes_no_sel.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_filtered_writes_no_sel.F90
new file mode 100644
index 0000000..ffec2fb
--- /dev/null
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_filtered_writes_no_sel.F90
@@ -0,0 +1,354 @@
+!
+! Example of using the parallel HDF5 library to collectively write to
+! datasets with filters applied to them when one or MPI ranks do not
+! have data to contribute to the dataset.
+!
+! If the HDF5_NOCLEANUP environment variable is set, the file that
+! this example creates will not be removed as the example finishes.
+!
+! The need of requirement of parallel file prefix is that in general
+! the current working directory in which compiling is done, is not suitable
+! for parallel I/O and there is no standard pathname for parallel file
+! systems. In some cases, the parallel file name may even need some
+! parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+! example parses the HDF5_PARAPREFIX environment variable for a prefix,
+! if one is needed.
+
+MODULE filter
+ USE HDF5
+ USE MPI
+
+ IMPLICIT NONE
+
+ CHARACTER(LEN=29), PARAMETER :: EXAMPLE_FILE = "ph5_filtered_writes_no_sel.h5"
+ INTEGER , PARAMETER :: EXAMPLE_DSET_DIMS = 2
+ CHARACTER(LEN=4) , PARAMETER :: EXAMPLE_DSET_NAME = "DSET"
+ INTEGER , PARAMETER :: EXAMPLE_DSET_CHUNK_DIM_SIZE = 10
+ INTEGER , PARAMETER :: PATH_MAX = 512
+
+ ! Global variables
+ INTEGER :: mpi_rank, mpi_size
+
+CONTAINS
+ !
+ ! Routine to set an HDF5 filter on the given DCPL
+ !
+ SUBROUTINE set_filter(dcpl_id)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: dcpl_id
+ LOGICAL :: filter_avail
+ INTEGER :: status
+
+ !
+ ! Check if 'deflate' filter is available
+ !
+ CALL H5Zfilter_avail_f(H5Z_FILTER_DEFLATE_F, filter_avail, status)
+ IF(status .LT. 0)THEN
+ RETURN
+ ELSE IF(filter_avail)THEN
+ !
+ ! Set 'deflate' filter with reasonable
+ ! compression level on DCPL
+
+ CALL H5Pset_deflate_f(dcpl_id, 6, status)
+ ELSE
+ !
+ ! Set Fletcher32 checksum filter on DCPL
+ ! since it is always available in HDF5
+ CALL H5Pset_fletcher32_f(dcpl_id, status)
+ ENDIF
+ END SUBROUTINE set_filter
+ !
+ ! Routine to fill a data buffer with data. Assumes
+ ! dimension rank is 2 and data is stored contiguous.
+
+
+ SUBROUTINE fill_databuf(start, count, stride, wdata)
+
+ IMPLICIT NONE
+ INTEGER(HSIZE_T), DIMENSION(*) :: start, count, stride
+ INTEGER, DIMENSION(*) :: wdata
+ INTEGER(HSIZE_T) :: i, j, icnt
+
+ ! Use MPI rank value for data
+ icnt = 1
+ DO i = 1, COUNT(1)
+ DO j = 1, COUNT(2)
+ wdata(icnt) = mpi_rank
+ icnt = icnt + 1
+ ENDDO
+ ENDDO
+
+ END SUBROUTINE fill_databuf
+ !
+ ! Cleanup created files
+ !
+ SUBROUTINE cleanup(filename)
+
+ IMPLICIT NONE
+ CHARACTER(*) :: filename
+
+ LOGICAL :: do_cleanup
+ INTEGER :: status
+
+ CALL get_environment_variable("HDF5_NOCLEANUP", STATUS=status)
+ IF(status.EQ.0)THEN
+ CALL MPI_File_delete(filename, MPI_INFO_NULL, status)
+ ENDIF
+
+ END SUBROUTINE cleanup
+ !
+ ! Routine to write to a dataset in a fashion
+ ! where no chunks in the dataset are written
+ ! to by more than 1 MPI rank. This will
+ ! generally give the best performance as the
+ ! MPI ranks will need the least amount of
+ ! inter-process communication.
+
+ SUBROUTINE write_dataset_some_no_sel(file_id, dxpl_id)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: file_id, dxpl_id
+
+ INTEGER, DIMENSION(1:EXAMPLE_DSET_CHUNK_DIM_SIZE, 4*EXAMPLE_DSET_CHUNK_DIM_SIZE), TARGET :: wdata
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: dataset_dims
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: chunk_dims
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: start
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: stride
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: count
+ LOGICAL :: no_selection = .FALSE.
+ INTEGER(hid_t) :: dset_id
+ INTEGER(hid_t) :: dcpl_id
+ INTEGER(hid_t) :: file_dataspace
+ INTEGER(hid_t) :: sel_type
+ TYPE(C_PTR) :: f_ptr
+ INTEGER :: status
+
+ !
+ ! ------------------------------------
+ ! Setup Dataset Creation Property List
+ ! ------------------------------------
+
+ CALL H5Pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, status)
+
+ !
+ ! REQUIRED: Dataset chunking must be enabled to
+ ! apply a data filter to the dataset.
+ ! Chunks in the dataset are of size
+ ! EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+
+ chunk_dims(1) = EXAMPLE_DSET_CHUNK_DIM_SIZE
+ chunk_dims(2) = EXAMPLE_DSET_CHUNK_DIM_SIZE
+ CALL H5Pset_chunk_f(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims, status)
+
+ ! Set filter to be applied to created datasets
+ CALL set_filter(dcpl_id)
+
+ !
+ ! ------------------------------------
+ ! Define the dimensions of the dataset
+ ! and create it
+ ! ------------------------------------
+
+ ! Create a dataset composed of 4 chunks
+ ! per MPI rank. The first dataset dimension
+ ! scales according to the number of MPI ranks.
+ ! The second dataset dimension stays fixed
+ ! according to the chunk size.
+
+ dataset_dims(1) = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size
+ dataset_dims(2) = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE
+
+ CALL H5Screate_simple_f(EXAMPLE_DSET_DIMS, dataset_dims, file_dataspace, status)
+
+ ! Create the dataset
+ CALL H5Dcreate_f(file_id, EXAMPLE_DSET_NAME, H5T_NATIVE_INTEGER, file_dataspace, dset_id, status, dcpl_id=dcpl_id)
+
+ !
+ ! ------------------------------------
+ ! Setup selection in the dataset for
+ ! each MPI rank
+ ! ------------------------------------
+
+ !
+ ! Odd rank value MPI ranks do not
+ ! contribute any data to the dataset.
+
+ IF(MOD(mpi_rank, 2) .NE. 0) no_selection = .TRUE.
+
+ IF(no_selection)THEN
+ !
+ ! MPI ranks not contributing data to
+ ! the dataset should call H5Sselect_none
+ ! on the file dataspace that will be
+ ! passed to H5Dwrite.
+
+ CALL H5Sselect_none_f(file_dataspace, status)
+ sel_type = H5S_BLOCK_F
+ ELSE
+ !
+ ! Even MPI ranks contribute data to
+ ! the dataset. Each MPI rank's selection
+ ! covers a single chunk in the first dataset
+ ! dimension. Each MPI rank's selection
+ ! covers 4 chunks in the second dataset
+ ! dimension. This leads to each contributing
+ ! MPI rank writing to 4 chunks of the dataset.
+
+ start(1) = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE
+ start(2) = 0
+ stride(1) = 1
+ stride(2) = 1
+ count(1) = EXAMPLE_DSET_CHUNK_DIM_SIZE
+ count(2) = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE
+
+ CALL H5Sselect_hyperslab_f(file_dataspace, H5S_SELECT_SET_F, start, count, status, stride=stride)
+
+ sel_type = H5S_ALL_F
+ !
+ ! --------------------------------------
+ ! Fill data buffer with MPI rank's rank
+ ! value to make it easy to see which
+ ! part of the dataset each rank wrote to
+ ! --------------------------------------
+
+ CALL fill_databuf(start, count, stride, wdata)
+ ENDIF
+
+ !
+ ! ---------------------------------
+ ! Write to the dataset collectively
+ ! ---------------------------------
+ f_ptr = C_LOC(wdata)
+ CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, status, &
+ mem_space_id=sel_type, file_space_id=file_dataspace, xfer_prp=dxpl_id)
+
+ !
+ ! --------------
+ ! Close HDF5 IDs
+ ! --------------
+
+ CALL H5Sclose_f(file_dataspace,status)
+ CALL H5Pclose_f(dcpl_id,status)
+ CALL H5Dclose_f(dset_id,status)
+
+ END SUBROUTINE write_dataset_some_no_sel
+ END MODULE filter
+
+ PROGRAM main
+
+ USE filter
+ IMPLICIT NONE
+
+ INTEGER :: comm = MPI_COMM_WORLD
+ INTEGER :: info = MPI_INFO_NULL
+ INTEGER(hid_t) :: file_id
+ INTEGER(hid_t) :: fapl_id
+ INTEGER(hid_t) :: dxpl_id
+ CHARACTER(LEN=PATH_MAX) :: par_prefix
+ CHARACTER(LEN=PATH_MAX) :: filename
+ INTEGER :: status
+
+ CALL MPI_Init(status)
+ CALL MPI_Comm_size(comm, mpi_size, status)
+ CALL MPI_Comm_rank(comm, mpi_rank, status)
+
+ !
+ ! Initialize HDF5 library and Fortran interfaces.
+ !
+ CALL h5open_f(status)
+ !
+ ! ----------------------------------
+ ! Start parallel access to HDF5 file
+ ! ----------------------------------
+
+ ! Setup File Access Property List with parallel I/O access
+ CALL H5Pcreate_f(H5P_FILE_ACCESS_F, fapl_id, status)
+ CALL H5Pset_fapl_mpio_f(fapl_id, comm, info, status)
+
+ !
+ ! OPTIONAL: Set collective metadata reads on FAPL to allow
+ ! parallel writes to filtered datasets to perform
+ ! better at scale. While not strictly necessary,
+ ! this is generally recommended.
+
+ CALL H5Pset_all_coll_metadata_ops_f(fapl_id, .TRUE., status)
+
+ !
+ ! OPTIONAL: Set the latest file format version for HDF5 in
+ ! order to gain access to different dataset chunk
+ ! index types and better data encoding methods.
+ ! While not strictly necessary, this is generally
+ ! recommended.
+
+ CALL H5Pset_libver_bounds_f(fapl_id, H5F_LIBVER_LATEST_F, H5F_LIBVER_LATEST_F, status)
+
+ ! Parse any parallel prefix and create filename
+ par_prefix(:) = ""
+ CALL get_environment_variable("HDF5_PARAPREFIX", VALUE=par_prefix, STATUS=status)
+ filename = TRIM(par_prefix)//EXAMPLE_FILE
+
+ ! Create HDF5 file
+ CALL H5Fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, status, access_prp = fapl_id)
+
+ !
+ ! --------------------------------------
+ ! Setup Dataset Transfer Property List
+ ! with collective I/O
+ ! --------------------------------------
+
+
+ CALL H5Pcreate_f(H5P_DATASET_XFER_F, dxpl_id, status)
+
+ !
+ ! REQUIRED: Setup collective I/O for the dataset
+ ! write operations. Parallel writes to
+ ! filtered datasets MUST be collective,
+ ! even if some ranks have no data to
+ ! contribute to the write operation.
+
+ CALL H5Pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, status)
+
+ !
+ ! --------------------------------
+ ! Create and write to the dataset
+ ! --------------------------------
+
+ !
+ ! Write to a dataset in a fashion where no
+ ! chunks in the dataset are written to by
+ ! more than 1 MPI rank and some MPI ranks
+ ! have nothing to contribute to the dataset.
+ ! In this case, the MPI ranks that have no
+ ! data to contribute must still participate
+ ! in the collective H5Dwrite call, but should
+ ! call H5Sselect_none on the file dataspace
+ ! passed to the H5Dwrite call.
+
+ CALL write_dataset_some_no_sel(file_id, dxpl_id)
+
+ !
+ ! ------------------
+ ! Close all HDF5 IDs
+ ! ------------------
+
+ CALL H5Pclose_f(dxpl_id, status)
+ CALL H5Pclose_f(fapl_id, status)
+ CALL H5Fclose_f(file_id, status)
+ !
+ ! Close FORTRAN interfaces and HDF5 library.
+ !
+ CALL h5close_f(status)
+
+ IF(mpi_rank .EQ. 0) WRITE(*,"(A)") "PHDF5 example finished with no errors"
+
+ !
+ ! ------------------------------------
+ ! Cleanup created HDF5 file and finish
+ ! ------------------------------------
+ CALL cleanup(filename)
+
+ CALL MPI_Finalize(status)
+
+END PROGRAM main
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_chunk.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_chunk.F90
index c74e55d..7be9389 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_chunk.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_chunk.F90
@@ -4,11 +4,10 @@
PROGRAM DATASET_BY_CHUNK
USE HDF5 ! This module contains all necessary modules
-! USE MPI
+ USE MPI
IMPLICIT NONE
- include 'mpif.h'
CHARACTER(LEN=11), PARAMETER :: filename = "sds_chnk.h5" ! File name
CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_col.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_col.F90
index dc92667..affb799 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_col.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_col.F90
@@ -1,7 +1,6 @@
!
-! Number of processes is assumed to be 1 or multiples of 2 (1,2,4,6,8)
+! Number of processes is assumed to be 1 or powers of 2 (2,4,8)
!
-
PROGRAM DATASET_BY_COL
USE HDF5 ! This module contains all necessary modules
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_pattern.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_pattern.F90
index dd02c63..c7e8da1 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_pattern.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_pattern.F90
@@ -5,10 +5,10 @@
PROGRAM DATASET_BY_PATTERN
USE HDF5 ! This module contains all necessary modules
+ USE MPI
IMPLICIT NONE
- include 'mpif.h'
CHARACTER(LEN=10), PARAMETER :: filename = "sds_pat.h5" ! File name
CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_row.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_row.F90
index f66da2a..66d5b25 100644
--- a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_row.F90
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_hyperslab_by_row.F90
@@ -4,10 +4,10 @@
PROGRAM DATASET_BY_ROW
USE HDF5 ! This module contains all necessary modules
+ USE MPI
IMPLICIT NONE
- include 'mpif.h'
CHARACTER(LEN=10), PARAMETER :: filename = "sds_row.h5" ! File name
CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name
diff --git a/HDF5Examples/FORTRAN/H5PAR/ph5_f90_subfiling.F90 b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_subfiling.F90
new file mode 100644
index 0000000..fc30717
--- /dev/null
+++ b/HDF5Examples/FORTRAN/H5PAR/ph5_f90_subfiling.F90
@@ -0,0 +1,521 @@
+!
+! Example of using HDF5's Subfiling VFD to write to an
+! HDF5 file that is striped across multiple subfiles
+!
+! If the HDF5_NOCLEANUP environment variable is set, the
+! files that this example creates will not be removed as
+! the example finishes.
+!
+! In general, the current working directory in which compiling
+! is done, is not suitable for parallel I/O and there is no
+! standard pathname for parallel file systems. In some cases,
+! the parallel file name may even need some parallel file type
+! prefix such as: "pfs:/GF/...". Therefore, this example parses
+! the HDF5_PARAPREFIX environment variable for a prefix, if one
+! is needed.
+!
+
+MODULE subf
+
+ USE HDF5
+ USE MPI
+
+ CHARACTER(LEN=31), PARAMETER :: EXAMPLE_FILE = "h5_subfiling_default_example.h5"
+ CHARACTER(LEN=30), PARAMETER :: EXAMPLE_FILE2 = "h5_subfiling_custom_example.h5"
+ CHARACTER(LEN=33), PARAMETER :: EXAMPLE_FILE3 = "h5_subfiling_precreate_example.h5"
+
+ CHARACTER(LEN=4), PARAMETER :: EXAMPLE_DSET_NAME = "DSET"
+ INTEGER , PARAMETER :: EXAMPLE_DSET_DIMS = 2
+
+ ! Have each MPI rank write 16MiB of data
+ INTEGER, PARAMETER :: EXAMPLE_DSET_NY = 4194304
+
+CONTAINS
+
+ ! Cleanup created files
+
+ SUBROUTINE cleanup(filename, fapl_id)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: fapl_id
+ CHARACTER(*) :: filename
+
+ LOGICAL :: do_cleanup
+ INTEGER :: status
+
+ CALL get_environment_variable("HDF5_NOCLEANUP", STATUS=status)
+ !IF(status.EQ.0) CALL H5Fdelete_f(filename, fapl_id, status)
+ IF(status.EQ.0)THEN
+ OPEN(UNIT=15, IOSTAT=status, FILE=filename, STATUS='old')
+ IF(status .EQ. 0) CLOSE(15, STATUS='DELETE')
+ ENDIF
+
+ END SUBROUTINE cleanup
+
+ ! An example of using the HDF5 Subfiling VFD with
+ ! its default settings of 1 subfile per node, with
+ ! a stripe size of 32MiB
+
+ SUBROUTINE subfiling_write_default(fapl_id, mpi_size, mpi_rank)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: fapl_id
+ INTEGER :: mpi_size
+ INTEGER :: mpi_rank
+
+ INTEGER, DIMENSION(:), ALLOCATABLE, TARGET :: wdata
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: dset_dims
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: start
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: count
+ INTEGER(hid_t) :: file_id
+ INTEGER(hid_t) :: subfiling_fapl
+ INTEGER(hid_t) :: dset_id
+ INTEGER(hid_t) :: filespace
+ CHARACTER(LEN=512) :: filename, par_prefix
+ INTEGER :: status
+ INTEGER(SIZE_T) :: i
+ TYPE(C_PTR) :: f_ptr
+
+ !
+ ! Make a copy of the FAPL so we don't disturb
+ ! it for the other examples
+ !
+ CALL H5Pcopy_f(fapl_id, subfiling_fapl, status)
+
+ !
+ ! Set Subfiling VFD on FAPL using default settings
+ ! (use IOC VFD, 1 IOC per node, 32MiB stripe size)
+ !
+ ! Note that all of Subfiling's configuration settings
+ ! can be adjusted with environment variables as well
+ ! in this case.
+ !
+
+ CALL H5Pset_fapl_subfiling_f(subfiling_fapl, status)
+
+ !
+ ! OPTIONAL: Set alignment of objects in HDF5 file to
+ ! be equal to the Subfiling stripe size.
+ ! Choosing a Subfiling stripe size and HDF5
+ ! object alignment value that are some
+ ! multiple of the disk block size can
+ ! generally help performance by ensuring
+ ! that I/O is well-aligned and doesn't
+ ! excessively cross stripe boundaries.
+ !
+ ! Note that this option can substantially
+ ! increase the size of the resulting HDF5
+ ! files, so it is a good idea to keep an eye
+ ! on this.
+ !
+
+ CALL H5Pset_alignment_f(subfiling_fapl, 0_HSIZE_T, 33554432_HSIZE_T, status) ! ALIGN to default 32MiB stripe size
+
+ ! Parse any parallel prefix and create filename
+ par_prefix(:) = ""
+ CALL get_environment_variable("HDF5_PARAPREFIX", VALUE=par_prefix, STATUS=status)
+ filename = TRIM(par_prefix)//EXAMPLE_FILE
+
+ ! Create a new file collectively
+ CALL H5Fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, status, access_prp = subfiling_fapl)
+
+ ! Create the dataspace for the dataset. The second
+ ! dimension varies with the number of MPI ranks
+ ! while the first dimension is fixed.
+
+ dset_dims(1) = EXAMPLE_DSET_NY
+ dset_dims(2) = mpi_size
+ CALL H5Screate_simple_f(EXAMPLE_DSET_DIMS, dset_dims, filespace, status)
+
+ ! Create the dataset with default properties
+
+ CALL H5Dcreate_f(file_id, EXAMPLE_DSET_NAME, H5T_NATIVE_INTEGER, filespace, dset_id, status)
+ ! Each MPI rank writes from a contiguous memory
+ ! region to the hyperslab in the file
+
+ start(1) = 0
+ start(2) = mpi_rank
+ count(1) = dset_dims(1)
+ count(2) = 1
+ CALL H5Sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, start, count, status)
+
+ ! Initialize data buffer
+ ALLOCATE(wdata(COUNT(1)*COUNT(2)))
+ DO i = 1, COUNT(1)*COUNT(2)
+ wdata(i) = mpi_rank
+ ENDDO
+
+ ! Write to dataset
+ f_ptr = C_LOC(wdata)
+ CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, status, mem_space_id=H5S_BLOCK_F, file_space_id=filespace)
+
+ ! Close/release resources.
+ DEALLOCATE(wdata)
+ CALL H5Dclose_f(dset_id, status)
+ CALL H5Sclose_f(filespace, status)
+
+ CALL H5Fclose_f(file_id, status)
+
+ CALL cleanup(EXAMPLE_FILE, subfiling_fapl)
+
+ CALL H5Pclose_f(subfiling_fapl, status)
+
+ END SUBROUTINE subfiling_write_default
+
+ !
+ ! An example of using the HDF5 Subfiling VFD with
+ ! custom settings
+ !
+
+ SUBROUTINE subfiling_write_custom(fapl_id, mpi_size, mpi_rank)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: fapl_id
+ INTEGER :: mpi_size
+ INTEGER :: mpi_rank
+
+ INTEGER, DIMENSION(:), ALLOCATABLE, TARGET :: wdata
+
+ TYPE(H5FD_subfiling_config_t) :: subf_config
+ TYPE(H5FD_ioc_config_t) :: ioc_config
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: dset_dims
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: start
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: count
+ INTEGER(hid_t) :: file_id
+ INTEGER(hid_t) :: subfiling_fapl
+ INTEGER(hid_t) :: dset_id
+ INTEGER(hid_t) :: filespace
+ CHARACTER(LEN=512) :: filename, par_prefix
+ INTEGER :: status
+ INTEGER(SIZE_T) :: i
+ TYPE(C_PTR) :: f_ptr
+
+ ! Make a copy of the FAPL so we don't disturb
+ ! it for the other examples
+
+ CALL H5Pcopy_f(fapl_id, subfiling_fapl, status)
+
+ ! Get a default Subfiling and IOC configuration
+ CALL h5pget_fapl_subfiling_f(subfiling_fapl, subf_config, status)
+ CALL h5pget_fapl_ioc_f(subfiling_fapl,ioc_config, status)
+
+ ! Set Subfiling configuration to use a 1MiB
+ ! stripe size and the SELECT_IOC_EVERY_NTH_RANK
+ ! selection method. By default, without a setting
+ ! in the H5FD_SUBFILING_IOC_SELECTION_CRITERIA
+ ! environment variable, this will use every MPI
+ ! rank as an I/O concentrator.
+
+ subf_config%shared_cfg%stripe_size = 1048576
+ subf_config%shared_cfg%ioc_selection = SELECT_IOC_EVERY_NTH_RANK_F
+
+ ! Set IOC configuration to use 2 worker threads
+ ! per IOC instead of the default setting and
+ ! update IOC configuration with new subfiling
+ ! configuration.
+
+ ioc_config%thread_pool_size = 2
+
+ ! Set our new configuration on the IOC
+ ! FAPL used for Subfiling
+
+ CALL H5Pset_fapl_ioc_f(subf_config%ioc_fapl_id, status, ioc_config)
+
+ ! Finally, set our new Subfiling configuration
+ ! on the original FAPL
+
+ CALL H5Pset_fapl_subfiling_f(subfiling_fapl, status, subf_config)
+ !
+ ! OPTIONAL: Set alignment of objects in HDF5 file to
+ ! be equal to the Subfiling stripe size.
+ ! Choosing a Subfiling stripe size and HDF5
+ ! object alignment value that are some
+ ! multiple of the disk block size can
+ ! generally help performance by ensuring
+ ! that I/O is well-aligned and doesn't
+ ! excessively cross stripe boundaries.
+ !
+ ! Note that this option can substantially
+ ! increase the size of the resulting HDF5
+ ! files, so it is a good idea to keep an eye
+ ! on this.
+ !
+
+ CALL H5Pset_alignment_f(subfiling_fapl, 0_HSIZE_T, 33554432_HSIZE_T, status) ! ALIGN to default 32MiB stripe size
+
+ ! Parse any parallel prefix and create filename
+ par_prefix(:) = ""
+ CALL get_environment_variable("HDF5_PARAPREFIX", VALUE=par_prefix, STATUS=status)
+ filename = TRIM(par_prefix)//EXAMPLE_FILE
+
+ ! Create a new file collectively
+ CALL H5Fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, status, access_prp = subfiling_fapl)
+
+ ! Create the dataspace for the dataset. The second
+ ! dimension varies with the number of MPI ranks
+ ! while the first dimension is fixed.
+
+ dset_dims(1) = EXAMPLE_DSET_NY
+ dset_dims(2) = mpi_size
+ CALL H5Screate_simple_f(EXAMPLE_DSET_DIMS, dset_dims, filespace, status)
+
+ ! Create the dataset with default properties
+
+ CALL H5Dcreate_f(file_id, EXAMPLE_DSET_NAME, H5T_NATIVE_INTEGER, filespace, dset_id, status)
+ ! Each MPI rank writes from a contiguous memory
+ ! region to the hyperslab in the file
+
+ start(1) = 0
+ start(2) = mpi_rank
+ count(1) = dset_dims(1)
+ count(2) = 1
+ CALL H5Sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, start, count, status)
+
+ ! Initialize data buffer
+ ALLOCATE(wdata(COUNT(1)*COUNT(2)))
+ DO i = 1, COUNT(1)*COUNT(2)
+ wdata(i) = mpi_rank
+ ENDDO
+
+ ! Write to dataset
+ f_ptr = C_LOC(wdata)
+ CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, status, mem_space_id=H5S_BLOCK_F, file_space_id=filespace)
+
+ ! Close/release resources.
+ DEALLOCATE(wdata)
+ CALL H5Dclose_f(dset_id, status)
+ CALL H5Sclose_f(filespace, status)
+
+ CALL H5Fclose_f(file_id, status)
+
+ CALL cleanup(EXAMPLE_FILE, subfiling_fapl)
+
+ CALL H5Pclose_f(subfiling_fapl, status)
+
+ END SUBROUTINE subfiling_write_custom
+
+ !
+ ! An example of pre-creating an HDF5 file on MPI rank
+ ! 0 when using the HDF5 Subfiling VFD. In this case,
+ ! the subfiling stripe count must be set so that rank
+ ! 0 knows how many subfiles to pre-create.
+
+ SUBROUTINE subfiling_write_precreate(fapl_id, mpi_size, mpi_rank)
+
+ IMPLICIT NONE
+ INTEGER(HID_T) :: fapl_id
+ INTEGER :: mpi_size
+ INTEGER :: mpi_rank
+
+ INTEGER, DIMENSION(:), ALLOCATABLE, TARGET :: wdata
+ TYPE(H5FD_subfiling_config_t) :: subf_config
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: dset_dims
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: start
+ INTEGER(hsize_t), DIMENSION(1:EXAMPLE_DSET_DIMS) :: count
+ INTEGER(hid_t) :: file_id
+ INTEGER(hid_t) :: subfiling_fapl
+ INTEGER(hid_t) :: dset_id
+ INTEGER(hid_t) :: filespace
+ CHARACTER(LEN=512) :: filename, par_prefix
+ INTEGER :: status
+ INTEGER(SIZE_T) :: i
+ TYPE(C_PTR) :: f_ptr
+
+ ! Make a copy of the FAPL so we don't disturb
+ ! it for the other examples
+
+ CALL H5Pcopy_f(fapl_id, subfiling_fapl, status)
+
+ ! Get a default Subfiling and IOC configuration
+ CALL h5pget_fapl_subfiling_f(subfiling_fapl, subf_config, status)
+
+ !
+ ! Set the Subfiling stripe count so that rank
+ ! 0 knows how many subfiles the logical HDF5
+ ! file should consist of. In this case, use
+ ! 5 subfiles with a default stripe size of
+ ! 32MiB.
+
+ subf_config%shared_cfg%stripe_count = 5
+ !
+ ! OPTIONAL: Set alignment of objects in HDF5 file to
+ ! be equal to the Subfiling stripe size.
+ ! Choosing a Subfiling stripe size and HDF5
+ ! object alignment value that are some
+ ! multiple of the disk block size can
+ ! generally help performance by ensuring
+ ! that I/O is well-aligned and doesn't
+ ! excessively cross stripe boundaries.
+ !
+ ! Note that this option can substantially
+ ! increase the size of the resulting HDF5
+ ! files, so it is a good idea to keep an eye
+ ! on this.
+ !
+
+ CALL H5Pset_alignment_f(subfiling_fapl, 0_HSIZE_T, 1048576_HSIZE_T, status) ! Align to custom 1MiB stripe size
+
+ ! Parse any parallel prefix and create filename
+ par_prefix(:) = ""
+ CALL get_environment_variable("HDF5_PARAPREFIX", VALUE=par_prefix, STATUS=status)
+ filename = TRIM(par_prefix)//EXAMPLE_FILE
+
+ ! Set dataset dimensionality
+ dset_dims(1) = EXAMPLE_DSET_NY
+ dset_dims(2) = mpi_size
+
+ IF (mpi_rank .EQ. 0) THEN
+ !
+ ! Make sure only this rank opens the file
+ !
+ CALL H5Pset_mpi_params_f(subfiling_fapl, MPI_COMM_SELF, MPI_INFO_NULL, status)
+
+ !
+ ! Set the Subfiling VFD on our FAPL using
+ ! our custom configuration
+ !
+ CALL H5Pset_fapl_subfiling_f(subfiling_fapl, status, subf_config);
+
+ !
+ ! Create a new file on rank 0
+ !
+ CALL H5Fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, status, access_prp = subfiling_fapl)
+
+ ! Create the dataspace for the dataset. The second
+ ! dimension varies with the number of MPI ranks
+ ! while the first dimension is fixed.
+ !
+ CALL H5Screate_simple_f(EXAMPLE_DSET_DIMS, dset_dims, filespace, status)
+
+ ! Create the dataset with default properties
+
+ CALL H5Dcreate_f(file_id, EXAMPLE_DSET_NAME, H5T_NATIVE_INTEGER, filespace, dset_id, status)
+
+ ! Initialize data buffer
+ ALLOCATE(wdata(dset_dims(1)*dset_dims(2)))
+ DO i = 1, dset_dims(1)*dset_dims(2)
+ wdata(i) = i
+ ENDDO
+
+ !
+ ! Rank 0 writes to the whole dataset
+ !
+ f_ptr = C_LOC(wdata)
+ CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, status, mem_space_id=H5S_BLOCK_F, file_space_id=filespace)
+
+ !
+ ! Close/release resources.
+ !
+ DEALLOCATE(wdata)
+ CALL H5Dclose_f(dset_id, status)
+ CALL H5Sclose_f(filespace, status)
+
+ CALL H5Fclose_f(file_id, status)
+ ENDIF
+
+ CALL MPI_Barrier(MPI_COMM_WORLD, status)
+
+ !
+ ! Use all MPI ranks to re-open the file and
+ ! read back the dataset that was created
+ !
+ CALL H5Pset_mpi_params_f(subfiling_fapl, MPI_COMM_WORLD, MPI_INFO_NULL, status)
+
+ !
+ ! Use the same subfiling configuration as rank 0
+ ! used to create the file
+ !
+ CALL H5Pset_fapl_subfiling_f(subfiling_fapl, status, subf_config)
+
+ !
+ ! Re-open the file on all ranks
+ !
+
+ CALL H5Fopen_f(filename, H5F_ACC_RDONLY_F, file_id, status, access_prp=subfiling_fapl)
+
+ !
+ ! Open the dataset that was created
+ !
+ CALL H5Dopen_f(file_id, EXAMPLE_DSET_NAME, dset_id, status)
+
+ !
+ ! Initialize data buffer
+ !
+
+ ALLOCATE(wdata(dset_dims(1)*dset_dims(2)))
+ !
+ ! Read the dataset on all ranks
+ !
+ f_ptr = C_LOC(wdata)
+ CALL H5Dread_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, status, mem_space_id=H5S_BLOCK_F, file_space_id=H5S_ALL_F)
+
+ DEALLOCATE(wdata)
+
+ CALL H5Dclose_f(dset_id, status)
+ CALL H5Fclose_f(file_id, status)
+
+ CALL cleanup(EXAMPLE_FILE, subfiling_fapl)
+
+ CALL H5Pclose_f(subfiling_fapl, status)
+
+ END SUBROUTINE subfiling_write_precreate
+
+END MODULE subf
+
+PROGRAM main
+
+ USE SUBF
+ IMPLICIT NONE
+
+ INTEGER :: comm = MPI_COMM_WORLD
+ INTEGER :: info = MPI_INFO_NULL
+ INTEGER(HID_T) :: fapl_id
+ INTEGER :: mpi_size
+ INTEGER :: mpi_rank
+ INTEGER :: required
+ INTEGER :: provided
+ INTEGER :: status
+
+ ! HDF5 Subfiling VFD requires MPI_Init_thread with MPI_THREAD_MULTIPLE
+ required = MPI_THREAD_MULTIPLE
+ provided = 0
+ CALL mpi_init_thread(required, provided, status)
+ IF (provided .NE. required) THEN
+ WRITE(*,*) "MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE *FAILED*"
+ CALL MPI_Abort(comm, -1, status)
+ ENDIF
+
+ CALL MPI_Comm_size(comm, mpi_size, status)
+ CALL MPI_Comm_rank(comm, mpi_rank, status)
+
+ !
+ ! Initialize HDF5 library and Fortran interfaces.
+ !
+ CALL h5open_f(status)
+
+ !
+ ! Set up File Access Property List with MPI
+ ! parameters for the Subfiling VFD to use
+ CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, status)
+ CALL H5Pset_mpi_params_f(fapl_id, comm, info, status)
+
+ ! Use Subfiling VFD with default settings
+ CALL subfiling_write_default(fapl_id, mpi_size, mpi_rank)
+
+ ! Use Subfiling VFD with custom settings
+ CALL subfiling_write_custom(fapl_id, mpi_size, mpi_rank)
+
+ ! Use Subfiling VFD to precreate the HDF5 file on MPI rank
+ CALL subfiling_write_precreate(fapl_id, mpi_size, mpi_rank)
+
+ CALL H5Pclose_f(fapl_id, status)
+ !
+ ! Close FORTRAN interfaces and HDF5 library.
+ !
+ CALL h5close_f(status)
+
+ IF(mpi_rank .EQ. 0) WRITE(*,"(A)") "PHDF5 example finished with no errors"
+
+ CALL MPI_Finalize(status)
+
+END PROGRAM main