summaryrefslogtreecommitdiffstats
path: root/HDF5Examples/C/H5PAR
diff options
context:
space:
mode:
authorAllen Byrne <50328838+byrnHDF@users.noreply.github.com>2023-11-27 21:30:15 (GMT)
committerGitHub <noreply@github.com>2023-11-27 21:30:15 (GMT)
commitfc88fcde1091cf12c1e88c783a14ee0f1cffe31c (patch)
tree91b88b62cd30ed37ee9227e43989e95035be43c3 /HDF5Examples/C/H5PAR
parenta067bf71f57723d2dfca7dfe2ffd9ea502eccd4f (diff)
downloadhdf5-fc88fcde1091cf12c1e88c783a14ee0f1cffe31c.zip
hdf5-fc88fcde1091cf12c1e88c783a14ee0f1cffe31c.tar.gz
hdf5-fc88fcde1091cf12c1e88c783a14ee0f1cffe31c.tar.bz2
Develop merge examples (#3851)
* Merge examples repo into library * Change grepTest to be more fault-tolerant * Update examples macro file * Exclude all Fortran examples from doxygen
Diffstat (limited to 'HDF5Examples/C/H5PAR')
-rw-r--r--HDF5Examples/C/H5PAR/CMakeLists.txt64
-rw-r--r--HDF5Examples/C/H5PAR/C_sourcefiles.cmake16
-rw-r--r--HDF5Examples/C/H5PAR/ph5_dataset.c101
-rw-r--r--HDF5Examples/C/H5PAR/ph5_file_create.c60
-rw-r--r--HDF5Examples/C/H5PAR/ph5_filtered_writes.c488
-rw-r--r--HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c369
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c157
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c140
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c152
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c119
-rw-r--r--HDF5Examples/C/H5PAR/ph5_subfiling.c551
11 files changed, 2217 insertions, 0 deletions
diff --git a/HDF5Examples/C/H5PAR/CMakeLists.txt b/HDF5Examples/C/H5PAR/CMakeLists.txt
new file mode 100644
index 0000000..4e2e297
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/CMakeLists.txt
@@ -0,0 +1,64 @@
+cmake_minimum_required (VERSION 3.12)
+PROJECT (H5PAR_C)
+
+#-----------------------------------------------------------------------------
+# Define Sources
+#-----------------------------------------------------------------------------
+include (C_sourcefiles.cmake)
+
+foreach (example_name ${examples})
+ add_executable (${EXAMPLE_VARNAME}_${example_name} ${PROJECT_SOURCE_DIR}/${example_name}.c)
+ target_compile_options(${EXAMPLE_VARNAME}_${example_name}
+ PRIVATE
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_16_API}>:-DH5_USE_16_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_18_API}>:-DH5_USE_18_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_110_API}>:-DH5_USE_110_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_112_API}>:-DH5_USE_112_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_114_API}>:-DH5_USE_114_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_116_API}>:-DH5_USE_116_API>"
+ )
+ target_include_directories (${EXAMPLE_VARNAME}_${example_name} PUBLIC ${MPI_C_INCLUDE_DIRS})
+ target_link_libraries (${EXAMPLE_VARNAME}_${example_name} ${H5EX_HDF5_LINK_LIBS})
+endforeach ()
+
+if (H5EX_BUILD_TESTING)
+ macro (ADD_GREP_TEST testname mumprocs)
+ add_test (
+ NAME ${EXAMPLE_VARNAME}_${testname}-clearall
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${testname}.h5
+ )
+ if (last_test)
+ set_tests_properties (${EXAMPLE_VARNAME}_${testname}-clearall PROPERTIES DEPENDS ${last_test})
+ endif ()
+ add_test (NAME ${EXAMPLE_VARNAME}_${testname} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${mumprocs};${MPIEXEC_PREFLAGS};$<TARGET_FILE:${EXAMPLE_VARNAME}_${testname}>;${MPIEXEC_POSTFLAGS}"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${testname}.out"
+ -D "TEST_REFERENCE:STRING=PHDF5 example finished with no errors"
+ -D "TEST_LIBRARY_DIRECTORY=${CMAKE_TEST_LIB_DIRECTORY}"
+ -P "${${EXAMPLE_PACKAGE_NAME}_RESOURCES_DIR}/grepTest.cmake"
+ )
+ set_tests_properties (${EXAMPLE_VARNAME}_${testname} PROPERTIES DEPENDS ${EXAMPLE_VARNAME}_${testname}-clearall)
+ set (last_test "${EXAMPLE_VARNAME}_${testname}")
+ endmacro ()
+
+ # Ensure that 24 is a multiple of the number of processes.
+ # The number 24 corresponds to SPACE1_DIM1 and SPACE1_DIM2 defined in ph5example.c
+ math(EXPR NUMPROCS "24 / ((24 + ${MPIEXEC_MAX_NUMPROCS} - 1) / ${MPIEXEC_MAX_NUMPROCS})")
+
+ foreach (example_name ${examples})
+ if (${example_name} STREQUAL "ph5_hyperslab_by_col")
+ ADD_GREP_TEST (${example_name} 2)
+ elseif (${example_name} STREQUAL "ph5_hyperslab_by_chunk" OR ${example_name} STREQUAL "ph5_hyperslab_by_pattern")
+ ADD_GREP_TEST (${example_name} 4)
+ else ()
+ ADD_GREP_TEST (${example_name} ${NUMPROCS})
+ endif ()
+ endforeach ()
+
+endif ()
diff --git a/HDF5Examples/C/H5PAR/C_sourcefiles.cmake b/HDF5Examples/C/H5PAR/C_sourcefiles.cmake
new file mode 100644
index 0000000..2e1ede2
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/C_sourcefiles.cmake
@@ -0,0 +1,16 @@
+#-----------------------------------------------------------------------------
+# Define Sources, one file per application
+#-----------------------------------------------------------------------------
+set (examples
+ ph5_filtered_writes
+ ph5_filtered_writes_no_sel
+ ph5_dataset
+ ph5_file_create
+ ph5_hyperslab_by_row
+ ph5_hyperslab_by_col
+ ph5_hyperslab_by_pattern
+ ph5_hyperslab_by_chunk
+)
+if (${HDF5_ENABLE_SUBFILING_VFD})
+ list (APPEND examples ph5_subfiling)
+endif ()
diff --git a/HDF5Examples/C/H5PAR/ph5_dataset.c b/HDF5Examples/C/H5PAR/ph5_dataset.c
new file mode 100644
index 0000000..9b8e8a8
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_dataset.c
@@ -0,0 +1,101 @@
+/*
+ * This example writes data to the HDF5 file.
+ * Number of processes is assumed to be 1 or multiples of 2 (up to 8)
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 5
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[] = {NX, NY}; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * dimsf[0] * dimsf[1]);
+ for (i = 0; i < dimsf[0] * dimsf[1]; i++) {
+ data[i] = i;
+ }
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * To write dataset independently use
+ *
+ * H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
+ */
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_file_create.c b/HDF5Examples/C/H5PAR/ph5_file_create.c
new file mode 100644
index 0000000..a3bd0a8
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_file_create.c
@@ -0,0 +1,60 @@
+/*
+ * This example creates an HDF5 file.
+ */
+
+#include "hdf5.h"
+
+#define H5FILE_NAME "SDS_row.h5"
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id; /* file and dataset identifiers */
+ hid_t plist_id; /* property list identifier( access template) */
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+
+ /*
+ * Close property list.
+ */
+ H5Pclose(plist_id);
+
+ /*
+ * Close the file.
+ */
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_filtered_writes.c b/HDF5Examples/C/H5PAR/ph5_filtered_writes.c
new file mode 100644
index 0000000..104704a
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_filtered_writes.c
@@ -0,0 +1,488 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to write to datasets
+ * with filters applied to them.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes.h5"
+#define EXAMPLE_DSET1_NAME "DSET1"
+#define EXAMPLE_DSET2_NAME "DSET2"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_no_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET1_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers a
+ * single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each MPI rank
+ * writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+static void
+write_dataset_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * mpi_size x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = mpi_size;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of N chunks,
+ * where N is the number of MPI ranks. The
+ * first dataset dimension scales according
+ * to the number of MPI ranks. The second
+ * dataset dimension stays fixed according
+ * to the chunk size.
+ */
+ dataset_dims[0] = mpi_size * chunk_dims[0];
+ dataset_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET2_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers
+ * part of every chunk in the first
+ * dimension. Each MPI rank's selection
+ * covers all of every chunk in the
+ * second dimension. This leads to
+ * each MPI rank writing an equal
+ * amount of data to every chunk
+ * in the dataset.
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ stride[0] = chunk_dims[0];
+ stride[1] = 1;
+ count[0] = mpi_size;
+ count[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ data = malloc(mpi_size * EXAMPLE_DSET_CHUNK_DIM_SIZE * sizeof(C_DATATYPE));
+
+ fill_databuf(start, count, stride, data);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ free(data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char *par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ *
+ * Refer to the 'ph5_filtered_writes_no_sel'
+ * example to see how to setup a dataset
+ * write when one or more MPI ranks have
+ * no data to contribute to the write
+ * operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to each dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank. This will generally
+ * give the best performance as the MPI ranks
+ * will need the least amount of inter-process
+ * communication.
+ */
+ write_dataset_no_overlap(file_id, dxpl_id);
+
+ /*
+ * Write to a dataset in a fashion where
+ * every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+ write_dataset_overlap(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c b/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c
new file mode 100644
index 0000000..a4d9e16
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c
@@ -0,0 +1,369 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to collectively write to
+ * datasets with filters applied to them when one or MPI ranks do not
+ * have data to contribute to the dataset.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes_no_sel.h5"
+#define EXAMPLE_DSET_NAME "DSET"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_some_no_sel(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hbool_t no_selection;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Odd rank value MPI ranks do not
+ * contribute any data to the dataset.
+ */
+ no_selection = (mpi_rank % 2) == 1;
+
+ if (no_selection) {
+ /*
+ * MPI ranks not contributing data to
+ * the dataset should call H5Sselect_none
+ * on the file dataspace that will be
+ * passed to H5Dwrite.
+ */
+ H5Sselect_none(file_dataspace);
+ }
+ else {
+ /*
+ * Even MPI ranks contribute data to
+ * the dataset. Each MPI rank's selection
+ * covers a single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each contributing
+ * MPI rank writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+ }
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, no_selection ? H5S_ALL : H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char *par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to the dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank and some MPI ranks
+ * have nothing to contribute to the dataset.
+ * In this case, the MPI ranks that have no
+ * data to contribute must still participate
+ * in the collective H5Dwrite call, but should
+ * call H5Sselect_none on the file dataspace
+ * passed to the H5Dwrite call.
+ */
+ write_dataset_some_no_sel(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c
new file mode 100644
index 0000000..a255b96
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c
@@ -0,0 +1,157 @@
+/*
+ * This example writes dataset sing chunking. Each process writes
+ * exactly one chunk.
+ * - |
+ * * V
+ * Number of processes is assumed to be 4.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_chnk.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 4
+#define CH_NX 4 /* chunk dimensions */
+#define CH_NY 2
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t chunk_dims[2]; /* chunk dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t block[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 4.
+ */
+ if (mpi_size != 4) {
+ printf("This example to set up to use only 4 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ chunk_dims[0] = CH_NX;
+ chunk_dims[1] = CH_NY;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK, chunk_dims, NULL);
+
+ /*
+ * Create chunked dataset.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ H5Pset_chunk(plist_id, RANK, chunk_dims);
+ dset_id = H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5Pclose(plist_id);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ block[0] = chunk_dims[0];
+ block[1] = chunk_dims[1];
+ if (mpi_rank == 0) {
+ offset[0] = 0;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 1) {
+ offset[0] = 0;
+ offset[1] = chunk_dims[1];
+ }
+ if (mpi_rank == 2) {
+ offset[0] = chunk_dims[0];
+ offset[1] = 0;
+ }
+ if (mpi_rank == 3) {
+ offset[0] = chunk_dims[0];
+ offset[1] = chunk_dims[1];
+ }
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * chunk_dims[0] * chunk_dims[1]);
+ for (i = 0; i < (int)chunk_dims[0] * chunk_dims[1]; i++) {
+ data[i] = mpi_rank + 1;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c
new file mode 100644
index 0000000..b397fcf
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c
@@ -0,0 +1,140 @@
+/*
+ * This example writes data to the HDF5 file by columns.
+ * Number of processes is assumed to be 2.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_col.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 6
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t dimsm[2]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t block[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i, j, k;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 2
+ */
+ if (mpi_size != 2) {
+ printf("This example to set up to use only 2 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ dimsm[0] = NX;
+ dimsm[1] = NY / 2;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK, dimsm, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 1;
+ count[1] = dimsm[1];
+ offset[0] = 0;
+ offset[1] = mpi_rank;
+ stride[0] = 1;
+ stride[1] = 2;
+ block[0] = dimsf[0];
+ block[1] = 1;
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * (size_t)dimsm[0] * (size_t)dimsm[1]);
+ for (i = 0; i < dimsm[0] * dimsm[1]; i = i + dimsm[1]) {
+ k = 1;
+ for (j = 0; j < dimsm[1]; j++) {
+ data[i + j] = (mpi_rank + 1) * k;
+ k = k * 10;
+ }
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c
new file mode 100644
index 0000000..77f3bef
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c
@@ -0,0 +1,152 @@
+/*
+ * This example writes data to the HDF5 file following some pattern
+ * - | - | ......
+ * * V * V ......
+ * - | - | ......
+ * * V * V ......
+ * ..............
+ * Number of processes is assumed to be 4.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_pat.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 4
+#define RANK 2
+#define RANK1 1
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t dimsm[1]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 4.
+ */
+ if (mpi_size != 4) {
+ printf("This example to set up to use only 4 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ dimsm[0] = NX;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK1, dimsm, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 4;
+ count[1] = 2;
+ stride[0] = 2;
+ stride[1] = 2;
+ if (mpi_rank == 0) {
+ offset[0] = 0;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 1) {
+ offset[0] = 1;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 2) {
+ offset[0] = 0;
+ offset[1] = 1;
+ }
+ if (mpi_rank == 3) {
+ offset[0] = 1;
+ offset[1] = 1;
+ }
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * dimsm[0]);
+ for (i = 0; i < (int)dimsm[0]; i++) {
+ data[i] = mpi_rank + 1;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c
new file mode 100644
index 0000000..5035786
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c
@@ -0,0 +1,119 @@
+/*
+ * This example writes data to the HDF5 file by rows.
+ * Number of processes is assumed to be 1 or multiples of 2 (up to 8)
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_row.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 5
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = dimsf[0] / mpi_size;
+ count[1] = dimsf[1];
+ offset[0] = mpi_rank * count[0];
+ offset[1] = 0;
+ memspace = H5Screate_simple(RANK, count, NULL);
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * count[0] * count[1]);
+ for (i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + 10;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_subfiling.c b/HDF5Examples/C/H5PAR/ph5_subfiling.c
new file mode 100644
index 0000000..7d72448
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_subfiling.c
@@ -0,0 +1,551 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using HDF5's Subfiling VFD to write to an
+ * HDF5 file that is striped across multiple subfiles
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the
+ * files that this example creates will not be removed as
+ * the example finishes.
+ *
+ * In general, the current working directory in which compiling
+ * is done, is not suitable for parallel I/O and there is no
+ * standard pathname for parallel file systems. In some cases,
+ * the parallel file name may even need some parallel file type
+ * prefix such as: "pfs:/GF/...". Therefore, this example parses
+ * the HDF5_PARAPREFIX environment variable for a prefix, if one
+ * is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_SUBFILING_VFD)
+
+#define EXAMPLE_FILE "h5_subfiling_default_example.h5"
+#define EXAMPLE_FILE2 "h5_subfiling_custom_example.h5"
+#define EXAMPLE_FILE3 "h5_subfiling_precreate_example.h5"
+
+#define EXAMPLE_DSET_NAME "DSET"
+#define EXAMPLE_DSET_DIMS 2
+
+/* Have each MPI rank write 16MiB of data */
+#define EXAMPLE_DSET_NY 4194304
+
+/* Dataset datatype */
+#define EXAMPLE_DSET_DATATYPE H5T_NATIVE_INT
+typedef int EXAMPLE_DSET_C_DATATYPE;
+
+/* Cleanup created files */
+static void
+cleanup(char *filename, hid_t fapl_id)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ H5Fdelete(filename, fapl_id);
+}
+
+/*
+ * An example of using the HDF5 Subfiling VFD with
+ * its default settings of 1 subfile per node, with
+ * a stripe size of 32MiB
+ */
+static void
+subfiling_write_default(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Set Subfiling VFD on FAPL using default settings
+ * (use IOC VFD, 1 IOC per node, 32MiB stripe size)
+ *
+ * Note that all of Subfiling's configuration settings
+ * can be adjusted with environment variables as well
+ * in this case.
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, NULL);
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 33554432); /* Align to default 32MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE);
+
+ /*
+ * Create a new file collectively
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Each MPI rank writes from a contiguous memory
+ * region to the hyperslab in the file
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ count[0] = 1;
+ count[1] = dset_dims[1];
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + i;
+ }
+
+ /*
+ * Write to dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+/*
+ * An example of using the HDF5 Subfiling VFD with
+ * custom settings
+ */
+static void
+subfiling_write_custom(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ H5FD_subfiling_config_t subf_config;
+ H5FD_ioc_config_t ioc_config;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Get a default Subfiling and IOC configuration
+ */
+ H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
+ H5Pget_fapl_ioc(subfiling_fapl, &ioc_config);
+
+ /*
+ * Set Subfiling configuration to use a 1MiB
+ * stripe size and the SELECT_IOC_EVERY_NTH_RANK
+ * selection method. By default, without a setting
+ * in the H5FD_SUBFILING_IOC_SELECTION_CRITERIA
+ * environment variable, this will use every MPI
+ * rank as an I/O concentrator.
+ */
+ subf_config.shared_cfg.stripe_size = 1048576;
+ subf_config.shared_cfg.ioc_selection = SELECT_IOC_EVERY_NTH_RANK;
+
+ /*
+ * Set IOC configuration to use 2 worker threads
+ * per IOC instead of the default setting and
+ * update IOC configuration with new subfiling
+ * configuration.
+ */
+ ioc_config.thread_pool_size = 2;
+
+ /*
+ * Set our new configuration on the IOC
+ * FAPL used for Subfiling
+ */
+ H5Pset_fapl_ioc(subf_config.ioc_fapl_id, &ioc_config);
+
+ /*
+ * Finally, set our new Subfiling configuration
+ * on the original FAPL
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE2);
+
+ /*
+ * Create a new file collectively
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Each MPI rank writes from a contiguous memory
+ * region to the hyperslab in the file
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ count[0] = 1;
+ count[1] = dset_dims[1];
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + i;
+ }
+
+ /*
+ * Write to dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE2, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+/*
+ * An example of pre-creating an HDF5 file on MPI rank
+ * 0 when using the HDF5 Subfiling VFD. In this case,
+ * the subfiling stripe count must be set so that rank
+ * 0 knows how many subfiles to pre-create.
+ */
+static void
+subfiling_write_precreate(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ H5FD_subfiling_config_t subf_config;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Get a default Subfiling and IOC configuration
+ */
+ H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Set the Subfiling stripe count so that rank
+ * 0 knows how many subfiles the logical HDF5
+ * file should consist of. In this case, use
+ * 5 subfiles with a default stripe size of
+ * 32MiB.
+ */
+ subf_config.shared_cfg.stripe_count = 5;
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE3);
+
+ /* Set dataset dimensionality */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+
+ if (mpi_rank == 0) {
+ /*
+ * Make sure only this rank opens the file
+ */
+ H5Pset_mpi_params(subfiling_fapl, MPI_COMM_SELF, MPI_INFO_NULL);
+
+ /*
+ * Set the Subfiling VFD on our FAPL using
+ * our custom configuration
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Create a new file on rank 0
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < dset_dims[0] * dset_dims[1]; i++) {
+ data[i] = i;
+ }
+
+ /*
+ * Rank 0 writes to the whole dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Use all MPI ranks to re-open the file and
+ * read back the dataset that was created
+ */
+ H5Pset_mpi_params(subfiling_fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /*
+ * Use the same subfiling configuration as rank 0
+ * used to create the file
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Re-open the file on all ranks
+ */
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, subfiling_fapl);
+
+ /*
+ * Open the dataset that was created
+ */
+ dset_id = H5Dopen2(file_id, EXAMPLE_DSET_NAME, H5P_DEFAULT);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+
+ /*
+ * Read the dataset on all ranks
+ */
+ H5Dread(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE3, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t fapl_id;
+ int mpi_size;
+ int mpi_rank;
+ int mpi_thread_required = MPI_THREAD_MULTIPLE;
+ int mpi_thread_provided = 0;
+
+ /* HDF5 Subfiling VFD requires MPI_Init_thread with MPI_THREAD_MULTIPLE */
+ MPI_Init_thread(&argc, &argv, mpi_thread_required, &mpi_thread_provided);
+ if (mpi_thread_provided < mpi_thread_required) {
+ printf("MPI_THREAD_MULTIPLE not supported\n");
+ MPI_Abort(comm, -1);
+ }
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up File Access Property List with MPI
+ * parameters for the Subfiling VFD to use
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_mpi_params(fapl_id, comm, info);
+
+ /* Use Subfiling VFD with default settings */
+ subfiling_write_default(fapl_id, mpi_size, mpi_rank);
+
+ /* Use Subfiling VFD with custom settings */
+ subfiling_write_custom(fapl_id, mpi_size, mpi_rank);
+
+ /*
+ * Use Subfiling VFD to precreate the HDF5
+ * file on MPI rank 0
+ */
+ subfiling_write_precreate(fapl_id, mpi_size, mpi_rank);
+
+ H5Pclose(fapl_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+/* dummy program since HDF5 is not parallel-enabled */
+int
+main(void)
+{
+ printf(
+ "Example program cannot run - HDF5 must be built with parallel support and Subfiling VFD support\n");
+ return 0;
+}
+
+#endif /* H5_HAVE_PARALLEL && H5_HAVE_SUBFILING_VFD */