summaryrefslogtreecommitdiffstats
path: root/HDF5Examples/C/H5PAR
diff options
context:
space:
mode:
authorAllen Byrne <50328838+byrnHDF@users.noreply.github.com>2024-01-03 17:23:42 (GMT)
committerGitHub <noreply@github.com>2024-01-03 17:23:42 (GMT)
commit72e33ad7727765fd162a1f70ca502cc2437aabde (patch)
tree84b4cff56687fa7a8d443a529984932dd30a7fa4 /HDF5Examples/C/H5PAR
parent95827bc79d592ad5aa71ec3199a83ede9b324c20 (diff)
downloadhdf5-72e33ad7727765fd162a1f70ca502cc2437aabde.zip
hdf5-72e33ad7727765fd162a1f70ca502cc2437aabde.tar.gz
hdf5-72e33ad7727765fd162a1f70ca502cc2437aabde.tar.bz2
Merge examples and workflows from develop (#3918)
Diffstat (limited to 'HDF5Examples/C/H5PAR')
-rw-r--r--HDF5Examples/C/H5PAR/CMakeLists.txt64
-rw-r--r--HDF5Examples/C/H5PAR/C_sourcefiles.cmake16
-rw-r--r--HDF5Examples/C/H5PAR/ph5_dataset.c101
-rw-r--r--HDF5Examples/C/H5PAR/ph5_file_create.c60
-rw-r--r--HDF5Examples/C/H5PAR/ph5_filtered_writes.c488
-rw-r--r--HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c369
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c157
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c140
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c152
-rw-r--r--HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c119
-rw-r--r--HDF5Examples/C/H5PAR/ph5_subfiling.c551
-rw-r--r--HDF5Examples/C/H5PAR/ph5example.c1100
12 files changed, 3317 insertions, 0 deletions
diff --git a/HDF5Examples/C/H5PAR/CMakeLists.txt b/HDF5Examples/C/H5PAR/CMakeLists.txt
new file mode 100644
index 0000000..6e569b4
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/CMakeLists.txt
@@ -0,0 +1,64 @@
+cmake_minimum_required (VERSION 3.12)
+PROJECT (H5PAR_C)
+
+#-----------------------------------------------------------------------------
+# Define Sources
+#-----------------------------------------------------------------------------
+include (C_sourcefiles.cmake)
+
+foreach (example_name ${examples})
+ add_executable (${EXAMPLE_VARNAME}_${example_name} ${PROJECT_SOURCE_DIR}/${example_name}.c)
+ target_compile_options(${EXAMPLE_VARNAME}_${example_name}
+ PRIVATE
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_16_API}>:-DH5_USE_16_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_18_API}>:-DH5_USE_18_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_110_API}>:-DH5_USE_110_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_112_API}>:-DH5_USE_112_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_114_API}>:-DH5_USE_114_API>"
+ "$<$<BOOL:${${EXAMPLE_VARNAME}_USE_116_API}>:-DH5_USE_116_API>"
+ )
+ target_include_directories (${EXAMPLE_VARNAME}_${example_name} PUBLIC ${MPI_C_INCLUDE_DIRS})
+ target_link_libraries (${EXAMPLE_VARNAME}_${example_name} ${H5EX_HDF5_LINK_LIBS})
+endforeach ()
+
+if (H5EX_BUILD_TESTING)
+ macro (ADD_GREP_TEST testname mumprocs)
+ add_test (
+ NAME ${EXAMPLE_VARNAME}_${testname}-clearall
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${testname}.h5
+ )
+ if (last_test)
+ set_tests_properties (${EXAMPLE_VARNAME}_${testname}-clearall PROPERTIES DEPENDS ${last_test})
+ endif ()
+ add_test (NAME ${EXAMPLE_VARNAME}_${testname} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${mumprocs};${MPIEXEC_PREFLAGS};$<TARGET_FILE:${EXAMPLE_VARNAME}_${testname}>;${MPIEXEC_POSTFLAGS}"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${testname}.out"
+ -D "TEST_REFERENCE:STRING=PHDF5 example finished with no errors"
+ -D "TEST_LIBRARY_DIRECTORY=${CMAKE_TEST_LIB_DIRECTORY}"
+ -P "${H5EX_RESOURCES_DIR}/grepTest.cmake"
+ )
+ set_tests_properties (${EXAMPLE_VARNAME}_${testname} PROPERTIES DEPENDS ${EXAMPLE_VARNAME}_${testname}-clearall)
+ set (last_test "${EXAMPLE_VARNAME}_${testname}")
+ endmacro ()
+
+ # Ensure that 24 is a multiple of the number of processes.
+ # The number 24 corresponds to SPACE1_DIM1 and SPACE1_DIM2 defined in ph5example.c
+ math(EXPR NUMPROCS "24 / ((24 + ${MPIEXEC_MAX_NUMPROCS} - 1) / ${MPIEXEC_MAX_NUMPROCS})")
+
+ foreach (example_name ${examples})
+ if (${example_name} STREQUAL "ph5_hyperslab_by_col")
+ ADD_GREP_TEST (${example_name} 2)
+ elseif (${example_name} STREQUAL "ph5_hyperslab_by_chunk" OR ${example_name} STREQUAL "ph5_hyperslab_by_pattern")
+ ADD_GREP_TEST (${example_name} 4)
+ else ()
+ ADD_GREP_TEST (${example_name} ${NUMPROCS})
+ endif ()
+ endforeach ()
+
+endif ()
diff --git a/HDF5Examples/C/H5PAR/C_sourcefiles.cmake b/HDF5Examples/C/H5PAR/C_sourcefiles.cmake
new file mode 100644
index 0000000..2e1ede2
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/C_sourcefiles.cmake
@@ -0,0 +1,16 @@
+#-----------------------------------------------------------------------------
+# Define Sources, one file per application
+#-----------------------------------------------------------------------------
+set (examples
+ ph5_filtered_writes
+ ph5_filtered_writes_no_sel
+ ph5_dataset
+ ph5_file_create
+ ph5_hyperslab_by_row
+ ph5_hyperslab_by_col
+ ph5_hyperslab_by_pattern
+ ph5_hyperslab_by_chunk
+)
+if (${HDF5_ENABLE_SUBFILING_VFD})
+ list (APPEND examples ph5_subfiling)
+endif ()
diff --git a/HDF5Examples/C/H5PAR/ph5_dataset.c b/HDF5Examples/C/H5PAR/ph5_dataset.c
new file mode 100644
index 0000000..9b8e8a8
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_dataset.c
@@ -0,0 +1,101 @@
+/*
+ * This example writes data to the HDF5 file.
+ * Number of processes is assumed to be 1 or multiples of 2 (up to 8)
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 5
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[] = {NX, NY}; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * dimsf[0] * dimsf[1]);
+ for (i = 0; i < dimsf[0] * dimsf[1]; i++) {
+ data[i] = i;
+ }
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * To write dataset independently use
+ *
+ * H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
+ */
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_file_create.c b/HDF5Examples/C/H5PAR/ph5_file_create.c
new file mode 100644
index 0000000..a3bd0a8
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_file_create.c
@@ -0,0 +1,60 @@
+/*
+ * This example creates an HDF5 file.
+ */
+
+#include "hdf5.h"
+
+#define H5FILE_NAME "SDS_row.h5"
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id; /* file and dataset identifiers */
+ hid_t plist_id; /* property list identifier( access template) */
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+
+ /*
+ * Close property list.
+ */
+ H5Pclose(plist_id);
+
+ /*
+ * Close the file.
+ */
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_filtered_writes.c b/HDF5Examples/C/H5PAR/ph5_filtered_writes.c
new file mode 100644
index 0000000..104704a
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_filtered_writes.c
@@ -0,0 +1,488 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to write to datasets
+ * with filters applied to them.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes.h5"
+#define EXAMPLE_DSET1_NAME "DSET1"
+#define EXAMPLE_DSET2_NAME "DSET2"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_no_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET1_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers a
+ * single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each MPI rank
+ * writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+static void
+write_dataset_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * mpi_size x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = mpi_size;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of N chunks,
+ * where N is the number of MPI ranks. The
+ * first dataset dimension scales according
+ * to the number of MPI ranks. The second
+ * dataset dimension stays fixed according
+ * to the chunk size.
+ */
+ dataset_dims[0] = mpi_size * chunk_dims[0];
+ dataset_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET2_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers
+ * part of every chunk in the first
+ * dimension. Each MPI rank's selection
+ * covers all of every chunk in the
+ * second dimension. This leads to
+ * each MPI rank writing an equal
+ * amount of data to every chunk
+ * in the dataset.
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ stride[0] = chunk_dims[0];
+ stride[1] = 1;
+ count[0] = mpi_size;
+ count[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ data = malloc(mpi_size * EXAMPLE_DSET_CHUNK_DIM_SIZE * sizeof(C_DATATYPE));
+
+ fill_databuf(start, count, stride, data);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ free(data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char *par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ *
+ * Refer to the 'ph5_filtered_writes_no_sel'
+ * example to see how to setup a dataset
+ * write when one or more MPI ranks have
+ * no data to contribute to the write
+ * operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to each dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank. This will generally
+ * give the best performance as the MPI ranks
+ * will need the least amount of inter-process
+ * communication.
+ */
+ write_dataset_no_overlap(file_id, dxpl_id);
+
+ /*
+ * Write to a dataset in a fashion where
+ * every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+ write_dataset_overlap(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c b/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c
new file mode 100644
index 0000000..a4d9e16
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c
@@ -0,0 +1,369 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to collectively write to
+ * datasets with filters applied to them when one or MPI ranks do not
+ * have data to contribute to the dataset.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes_no_sel.h5"
+#define EXAMPLE_DSET_NAME "DSET"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_some_no_sel(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hbool_t no_selection;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Odd rank value MPI ranks do not
+ * contribute any data to the dataset.
+ */
+ no_selection = (mpi_rank % 2) == 1;
+
+ if (no_selection) {
+ /*
+ * MPI ranks not contributing data to
+ * the dataset should call H5Sselect_none
+ * on the file dataspace that will be
+ * passed to H5Dwrite.
+ */
+ H5Sselect_none(file_dataspace);
+ }
+ else {
+ /*
+ * Even MPI ranks contribute data to
+ * the dataset. Each MPI rank's selection
+ * covers a single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each contributing
+ * MPI rank writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+ }
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, no_selection ? H5S_ALL : H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char *par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to the dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank and some MPI ranks
+ * have nothing to contribute to the dataset.
+ * In this case, the MPI ranks that have no
+ * data to contribute must still participate
+ * in the collective H5Dwrite call, but should
+ * call H5Sselect_none on the file dataspace
+ * passed to the H5Dwrite call.
+ */
+ write_dataset_some_no_sel(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c
new file mode 100644
index 0000000..a255b96
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_chunk.c
@@ -0,0 +1,157 @@
+/*
+ * This example writes dataset sing chunking. Each process writes
+ * exactly one chunk.
+ * - |
+ * * V
+ * Number of processes is assumed to be 4.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_chnk.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 4
+#define CH_NX 4 /* chunk dimensions */
+#define CH_NY 2
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t chunk_dims[2]; /* chunk dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t block[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 4.
+ */
+ if (mpi_size != 4) {
+ printf("This example to set up to use only 4 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ chunk_dims[0] = CH_NX;
+ chunk_dims[1] = CH_NY;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK, chunk_dims, NULL);
+
+ /*
+ * Create chunked dataset.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ H5Pset_chunk(plist_id, RANK, chunk_dims);
+ dset_id = H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5Pclose(plist_id);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ block[0] = chunk_dims[0];
+ block[1] = chunk_dims[1];
+ if (mpi_rank == 0) {
+ offset[0] = 0;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 1) {
+ offset[0] = 0;
+ offset[1] = chunk_dims[1];
+ }
+ if (mpi_rank == 2) {
+ offset[0] = chunk_dims[0];
+ offset[1] = 0;
+ }
+ if (mpi_rank == 3) {
+ offset[0] = chunk_dims[0];
+ offset[1] = chunk_dims[1];
+ }
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * chunk_dims[0] * chunk_dims[1]);
+ for (i = 0; i < (int)chunk_dims[0] * chunk_dims[1]; i++) {
+ data[i] = mpi_rank + 1;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c
new file mode 100644
index 0000000..b397fcf
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_col.c
@@ -0,0 +1,140 @@
+/*
+ * This example writes data to the HDF5 file by columns.
+ * Number of processes is assumed to be 2.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_col.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 6
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t dimsm[2]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t block[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i, j, k;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 2
+ */
+ if (mpi_size != 2) {
+ printf("This example to set up to use only 2 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ dimsm[0] = NX;
+ dimsm[1] = NY / 2;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK, dimsm, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 1;
+ count[1] = dimsm[1];
+ offset[0] = 0;
+ offset[1] = mpi_rank;
+ stride[0] = 1;
+ stride[1] = 2;
+ block[0] = dimsf[0];
+ block[1] = 1;
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * (size_t)dimsm[0] * (size_t)dimsm[1]);
+ for (i = 0; i < dimsm[0] * dimsm[1]; i = i + dimsm[1]) {
+ k = 1;
+ for (j = 0; j < dimsm[1]; j++) {
+ data[i + j] = (mpi_rank + 1) * k;
+ k = k * 10;
+ }
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c
new file mode 100644
index 0000000..77f3bef
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_pattern.c
@@ -0,0 +1,152 @@
+/*
+ * This example writes data to the HDF5 file following some pattern
+ * - | - | ......
+ * * V * V ......
+ * - | - | ......
+ * * V * V ......
+ * ..............
+ * Number of processes is assumed to be 4.
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_pat.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 4
+#define RANK 2
+#define RANK1 1
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ hsize_t dimsm[1]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t stride[2];
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ /*
+ * Exit if number of processes is not 4.
+ */
+ if (mpi_size != 4) {
+ printf("This example to set up to use only 4 processes \n");
+ printf("Quitting...\n");
+ return 0;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ dimsm[0] = NX;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+ memspace = H5Screate_simple(RANK1, dimsm, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = 4;
+ count[1] = 2;
+ stride[0] = 2;
+ stride[1] = 2;
+ if (mpi_rank == 0) {
+ offset[0] = 0;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 1) {
+ offset[0] = 1;
+ offset[1] = 0;
+ }
+ if (mpi_rank == 2) {
+ offset[0] = 0;
+ offset[1] = 1;
+ }
+ if (mpi_rank == 3) {
+ offset[0] = 1;
+ offset[1] = 1;
+ }
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * dimsm[0]);
+ for (i = 0; i < (int)dimsm[0]; i++) {
+ data[i] = mpi_rank + 1;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c
new file mode 100644
index 0000000..5035786
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_hyperslab_by_row.c
@@ -0,0 +1,119 @@
+/*
+ * This example writes data to the HDF5 file by rows.
+ * Number of processes is assumed to be 1 or multiples of 2 (up to 8)
+ */
+
+#include "hdf5.h"
+#include "stdlib.h"
+
+#define H5FILE_NAME "SDS_row.h5"
+#define DATASETNAME "IntArray"
+#define NX 8 /* dataset dimensions */
+#define NY 5
+#define RANK 2
+
+int
+main(int argc, char **argv)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace identifiers */
+ hsize_t dimsf[2]; /* dataset dimensions */
+ int *data; /* pointer to data buffer to write */
+ hsize_t count[2]; /* hyperslab selection parameters */
+ hsize_t offset[2];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*
+ * Initialize MPI
+ */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ dimsf[0] = NX;
+ dimsf[1] = NY;
+ filespace = H5Screate_simple(RANK, dimsf, NULL);
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id =
+ H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Sclose(filespace);
+
+ /*
+ * Each process defines dataset in memory and writes it to the hyperslab
+ * in the file.
+ */
+ count[0] = dimsf[0] / mpi_size;
+ count[1] = dimsf[1];
+ offset[0] = mpi_rank * count[0];
+ offset[1] = 0;
+ memspace = H5Screate_simple(RANK, count, NULL);
+
+ /*
+ * Select hyperslab in the file.
+ */
+ filespace = H5Dget_space(dset_id);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = (int *)malloc(sizeof(int) * count[0] * count[1]);
+ for (i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + 10;
+ }
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+ free(data);
+
+ /*
+ * Close/release resources.
+ */
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(plist_id);
+ H5Fclose(file_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/HDF5Examples/C/H5PAR/ph5_subfiling.c b/HDF5Examples/C/H5PAR/ph5_subfiling.c
new file mode 100644
index 0000000..7d72448
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5_subfiling.c
@@ -0,0 +1,551 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using HDF5's Subfiling VFD to write to an
+ * HDF5 file that is striped across multiple subfiles
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the
+ * files that this example creates will not be removed as
+ * the example finishes.
+ *
+ * In general, the current working directory in which compiling
+ * is done, is not suitable for parallel I/O and there is no
+ * standard pathname for parallel file systems. In some cases,
+ * the parallel file name may even need some parallel file type
+ * prefix such as: "pfs:/GF/...". Therefore, this example parses
+ * the HDF5_PARAPREFIX environment variable for a prefix, if one
+ * is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_SUBFILING_VFD)
+
+#define EXAMPLE_FILE "h5_subfiling_default_example.h5"
+#define EXAMPLE_FILE2 "h5_subfiling_custom_example.h5"
+#define EXAMPLE_FILE3 "h5_subfiling_precreate_example.h5"
+
+#define EXAMPLE_DSET_NAME "DSET"
+#define EXAMPLE_DSET_DIMS 2
+
+/* Have each MPI rank write 16MiB of data */
+#define EXAMPLE_DSET_NY 4194304
+
+/* Dataset datatype */
+#define EXAMPLE_DSET_DATATYPE H5T_NATIVE_INT
+typedef int EXAMPLE_DSET_C_DATATYPE;
+
+/* Cleanup created files */
+static void
+cleanup(char *filename, hid_t fapl_id)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ H5Fdelete(filename, fapl_id);
+}
+
+/*
+ * An example of using the HDF5 Subfiling VFD with
+ * its default settings of 1 subfile per node, with
+ * a stripe size of 32MiB
+ */
+static void
+subfiling_write_default(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Set Subfiling VFD on FAPL using default settings
+ * (use IOC VFD, 1 IOC per node, 32MiB stripe size)
+ *
+ * Note that all of Subfiling's configuration settings
+ * can be adjusted with environment variables as well
+ * in this case.
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, NULL);
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 33554432); /* Align to default 32MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE);
+
+ /*
+ * Create a new file collectively
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Each MPI rank writes from a contiguous memory
+ * region to the hyperslab in the file
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ count[0] = 1;
+ count[1] = dset_dims[1];
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + i;
+ }
+
+ /*
+ * Write to dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+/*
+ * An example of using the HDF5 Subfiling VFD with
+ * custom settings
+ */
+static void
+subfiling_write_custom(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ H5FD_subfiling_config_t subf_config;
+ H5FD_ioc_config_t ioc_config;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Get a default Subfiling and IOC configuration
+ */
+ H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
+ H5Pget_fapl_ioc(subfiling_fapl, &ioc_config);
+
+ /*
+ * Set Subfiling configuration to use a 1MiB
+ * stripe size and the SELECT_IOC_EVERY_NTH_RANK
+ * selection method. By default, without a setting
+ * in the H5FD_SUBFILING_IOC_SELECTION_CRITERIA
+ * environment variable, this will use every MPI
+ * rank as an I/O concentrator.
+ */
+ subf_config.shared_cfg.stripe_size = 1048576;
+ subf_config.shared_cfg.ioc_selection = SELECT_IOC_EVERY_NTH_RANK;
+
+ /*
+ * Set IOC configuration to use 2 worker threads
+ * per IOC instead of the default setting and
+ * update IOC configuration with new subfiling
+ * configuration.
+ */
+ ioc_config.thread_pool_size = 2;
+
+ /*
+ * Set our new configuration on the IOC
+ * FAPL used for Subfiling
+ */
+ H5Pset_fapl_ioc(subf_config.ioc_fapl_id, &ioc_config);
+
+ /*
+ * Finally, set our new Subfiling configuration
+ * on the original FAPL
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE2);
+
+ /*
+ * Create a new file collectively
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Each MPI rank writes from a contiguous memory
+ * region to the hyperslab in the file
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ count[0] = 1;
+ count[1] = dset_dims[1];
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < count[0] * count[1]; i++) {
+ data[i] = mpi_rank + i;
+ }
+
+ /*
+ * Write to dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE2, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+/*
+ * An example of pre-creating an HDF5 file on MPI rank
+ * 0 when using the HDF5 Subfiling VFD. In this case,
+ * the subfiling stripe count must be set so that rank
+ * 0 knows how many subfiles to pre-create.
+ */
+static void
+subfiling_write_precreate(hid_t fapl_id, int mpi_size, int mpi_rank)
+{
+ EXAMPLE_DSET_C_DATATYPE *data;
+ H5FD_subfiling_config_t subf_config;
+ hsize_t dset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hid_t file_id;
+ hid_t subfiling_fapl;
+ hid_t dset_id;
+ hid_t filespace;
+ char filename[512];
+ char *par_prefix;
+
+ /*
+ * Make a copy of the FAPL so we don't disturb
+ * it for the other examples
+ */
+ subfiling_fapl = H5Pcopy(fapl_id);
+
+ /*
+ * Get a default Subfiling and IOC configuration
+ */
+ H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Set the Subfiling stripe count so that rank
+ * 0 knows how many subfiles the logical HDF5
+ * file should consist of. In this case, use
+ * 5 subfiles with a default stripe size of
+ * 32MiB.
+ */
+ subf_config.shared_cfg.stripe_count = 5;
+
+ /*
+ * OPTIONAL: Set alignment of objects in HDF5 file to
+ * be equal to the Subfiling stripe size.
+ * Choosing a Subfiling stripe size and HDF5
+ * object alignment value that are some
+ * multiple of the disk block size can
+ * generally help performance by ensuring
+ * that I/O is well-aligned and doesn't
+ * excessively cross stripe boundaries.
+ *
+ * Note that this option can substantially
+ * increase the size of the resulting HDF5
+ * files, so it is a good idea to keep an eye
+ * on this.
+ */
+ H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
+ EXAMPLE_FILE3);
+
+ /* Set dataset dimensionality */
+ dset_dims[0] = mpi_size;
+ dset_dims[1] = EXAMPLE_DSET_NY;
+
+ if (mpi_rank == 0) {
+ /*
+ * Make sure only this rank opens the file
+ */
+ H5Pset_mpi_params(subfiling_fapl, MPI_COMM_SELF, MPI_INFO_NULL);
+
+ /*
+ * Set the Subfiling VFD on our FAPL using
+ * our custom configuration
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Create a new file on rank 0
+ */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
+
+ /*
+ * Create the dataspace for the dataset. The first
+ * dimension varies with the number of MPI ranks
+ * while the second dimension is fixed.
+ */
+ filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
+
+ /*
+ * Create the dataset with default properties
+ */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+ for (size_t i = 0; i < dset_dims[0] * dset_dims[1]; i++) {
+ data[i] = i;
+ }
+
+ /*
+ * Rank 0 writes to the whole dataset
+ */
+ H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Sclose(filespace);
+ H5Fclose(file_id);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Use all MPI ranks to re-open the file and
+ * read back the dataset that was created
+ */
+ H5Pset_mpi_params(subfiling_fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /*
+ * Use the same subfiling configuration as rank 0
+ * used to create the file
+ */
+ H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
+
+ /*
+ * Re-open the file on all ranks
+ */
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, subfiling_fapl);
+
+ /*
+ * Open the dataset that was created
+ */
+ dset_id = H5Dopen2(file_id, EXAMPLE_DSET_NAME, H5P_DEFAULT);
+
+ /*
+ * Initialize data buffer
+ */
+ data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
+
+ /*
+ * Read the dataset on all ranks
+ */
+ H5Dread(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, data);
+
+ /*
+ * Close/release resources.
+ */
+
+ free(data);
+
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+
+ cleanup(EXAMPLE_FILE3, subfiling_fapl);
+
+ H5Pclose(subfiling_fapl);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t fapl_id;
+ int mpi_size;
+ int mpi_rank;
+ int mpi_thread_required = MPI_THREAD_MULTIPLE;
+ int mpi_thread_provided = 0;
+
+ /* HDF5 Subfiling VFD requires MPI_Init_thread with MPI_THREAD_MULTIPLE */
+ MPI_Init_thread(&argc, &argv, mpi_thread_required, &mpi_thread_provided);
+ if (mpi_thread_provided < mpi_thread_required) {
+ printf("MPI_THREAD_MULTIPLE not supported\n");
+ MPI_Abort(comm, -1);
+ }
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * Set up File Access Property List with MPI
+ * parameters for the Subfiling VFD to use
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_mpi_params(fapl_id, comm, info);
+
+ /* Use Subfiling VFD with default settings */
+ subfiling_write_default(fapl_id, mpi_size, mpi_rank);
+
+ /* Use Subfiling VFD with custom settings */
+ subfiling_write_custom(fapl_id, mpi_size, mpi_rank);
+
+ /*
+ * Use Subfiling VFD to precreate the HDF5
+ * file on MPI rank 0
+ */
+ subfiling_write_precreate(fapl_id, mpi_size, mpi_rank);
+
+ H5Pclose(fapl_id);
+
+ if (mpi_rank == 0)
+ printf("PHDF5 example finished with no errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+/* dummy program since HDF5 is not parallel-enabled */
+int
+main(void)
+{
+ printf(
+ "Example program cannot run - HDF5 must be built with parallel support and Subfiling VFD support\n");
+ return 0;
+}
+
+#endif /* H5_HAVE_PARALLEL && H5_HAVE_SUBFILING_VFD */
diff --git a/HDF5Examples/C/H5PAR/ph5example.c b/HDF5Examples/C/H5PAR/ph5example.c
new file mode 100644
index 0000000..5ec2cdc
--- /dev/null
+++ b/HDF5Examples/C/H5PAR/ph5example.c
@@ -0,0 +1,1100 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to access datasets.
+ * Last revised: April 24, 2001.
+ *
+ * This program contains two parts. In the first part, the mpi processes
+ * collectively create a new parallel HDF5 file and create two fixed
+ * dimension datasets in it. Then each process writes a hyperslab into
+ * each dataset in an independent mode. All processes collectively
+ * close the datasets and the file.
+ * In the second part, the processes collectively open the created file
+ * and the two datasets in it. Then each process reads a hyperslab from
+ * each dataset in an independent mode and prints them out.
+ * All processes collectively close the datasets and the file.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even needs some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example requires an explicit parallel file prefix. See the usage
+ * for more detail.
+ */
+
+#include <assert.h>
+#include "hdf5.h"
+#include <string.h>
+#include <stdlib.h>
+
+#ifdef H5_HAVE_PARALLEL
+/* Temporary source code */
+#define FAIL -1
+/* temporary code end */
+
+/* Define some handy debugging shorthands, routines, ... */
+/* debugging tools */
+#define MESG(x) \
+ do { \
+ if (verbose) \
+ printf("%s\n", x); \
+ } while (0)
+
+#define MPI_BANNER(mesg) \
+ do { \
+ printf("--------------------------------\n"); \
+ printf("Proc %d: ", mpi_rank); \
+ printf("*** %s\n", mesg); \
+ printf("--------------------------------\n"); \
+ } while (0)
+
+#define SYNC(comm) \
+ do { \
+ MPI_BANNER("doing a SYNC"); \
+ MPI_Barrier(comm); \
+ MPI_BANNER("SYNC DONE"); \
+ } while (0)
+/* End of Define some handy debugging shorthands, routines, ... */
+
+/* Constants definitions */
+/* 24 is a multiple of 2, 3, 4, 6, 8, 12. Neat for parallel tests. */
+#define SPACE1_DIM1 24
+#define SPACE1_DIM2 24
+#define SPACE1_RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+/* hyperslab layout styles */
+#define BYROW 1 /* divide into slabs of rows */
+#define BYCOL 2 /* divide into blocks of columns */
+
+#define PARAPREFIX "HDF5_PARAPREFIX" /* file prefix environment variable name */
+
+/* dataset data type. Int's can be easily octo dumped. */
+typedef int DATATYPE;
+
+/* global variables */
+int nerrors = 0; /* errors count */
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+char testfiles[2][PATH_MAX];
+
+int mpi_size, mpi_rank; /* mpi variables */
+
+/* option flags */
+int verbose = 0; /* verbose, default as no. */
+int doread = 1; /* read test */
+int dowrite = 1; /* write test */
+int docleanup = 1; /* cleanup */
+
+/* Prototypes */
+void slab_set(hsize_t start[], hsize_t count[], hsize_t stride[], int mode);
+void dataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset);
+void dataset_print(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset);
+int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original);
+void phdf5writeInd(char *filename);
+void phdf5readInd(char *filename);
+void phdf5writeAll(char *filename);
+void phdf5readAll(char *filename);
+void test_split_comm_access(char filenames[][PATH_MAX]);
+int parse_options(int argc, char **argv);
+void usage(void);
+int mkfilenames(char *prefix);
+void cleanup(void);
+
+/*
+ * Setup the dimensions of the hyperslab.
+ * Two modes--by rows or by columns.
+ * Assume dimension rank is 2.
+ */
+void
+slab_set(hsize_t start[], hsize_t count[], hsize_t stride[], int mode)
+{
+ switch (mode) {
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1 / mpi_size;
+ count[1] = SPACE1_DIM2;
+ start[0] = mpi_rank * count[0];
+ start[1] = 0;
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1;
+ count[1] = SPACE1_DIM2 / mpi_size;
+ start[0] = 0;
+ start[1] = mpi_rank * count[1];
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ printf("unknown slab_set mode (%d)\n", mode);
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1;
+ count[1] = SPACE1_DIM2;
+ start[0] = 0;
+ start[1] = 0;
+ break;
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+void
+dataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = (i * stride[0] + start[0]) * 100 + (j * stride[1] + start[1] + 1);
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+void
+dataset_print(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the slab read */
+ for (i = 0; i < count[0]; i++) {
+ printf("Row %lu: ", (unsigned long)(i * stride[0] + start[0]));
+ for (j = 0; j < count[1]; j++) {
+ printf("%03d ", *dataptr++);
+ }
+ printf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+int
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original)
+{
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+
+ hsize_t i, j;
+ int nerr;
+
+ /* print it if verbose */
+ if (verbose)
+ dataset_print(start, count, stride, dataset);
+
+ nerr = 0;
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ if (*dataset++ != *original++) {
+ nerr++;
+ if (nerr <= MAX_ERR_REPORT) {
+ printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i * stride[0] + start[0]),
+ (unsigned long)(j * stride[1] + start[1]), *(dataset - 1), *(original - 1));
+ }
+ }
+ }
+ }
+ if (nerr > MAX_ERR_REPORT)
+ printf("[more errors ...]\n");
+ if (nerr)
+ printf("%d errors found in dataset_vrfy\n", nerr);
+ return (nerr);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 files with parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset.
+ */
+
+void
+phdf5writeInd(char *filename)
+{
+ hid_t fid1; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t sid1; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims1[SPACE1_RANK] = {SPACE1_DIM1, SPACE1_DIM2}; /* dataspace dim sizes */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+
+ hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Independent write test on file %s\n", filename);
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
+ acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ MESG("H5Pcreate access succeed");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
+ assert(ret != FAIL);
+ MESG("H5Pset_fapl_mpio succeed");
+
+ /* create the file collectively */
+ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl1);
+ assert(fid1 != FAIL);
+ MESG("H5Fcreate succeed");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and the slabs local to the MPI process.
+ * ------------------------- */
+ /* setup dimensionality object */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ assert(sid1 != FAIL);
+ MESG("H5Screate_simple succeed");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ assert(dataset1 != FAIL);
+ MESG("H5Dcreate2 succeed");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ assert(dataset2 != FAIL);
+ MESG("H5Dcreate2 succeed");
+
+ /* set up dimensions of the slab this process accesses */
+ start[0] = mpi_rank * SPACE1_DIM1 / mpi_size;
+ start[1] = 0;
+ count[0] = SPACE1_DIM1 / mpi_size;
+ count[1] = SPACE1_DIM2;
+ stride[0] = 1;
+ stride[1] = 1;
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* release dataspace ID */
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ assert(ret != FAIL);
+ MESG("H5Dclose1 succeed");
+ ret = H5Dclose(dataset2);
+ assert(ret != FAIL);
+ MESG("H5Dclose2 succeed");
+
+ /* release all IDs created */
+ H5Sclose(sid1);
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
+/* Example of using the parallel HDF5 library to read a dataset */
+void
+phdf5readInd(char *filename)
+{
+ hid_t fid1; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+ DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
+
+ hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Independent read test on file %s\n", filename);
+
+ /* setup file access template */
+ acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
+ assert(ret != FAIL);
+
+ /* open the file collectively */
+ fid1 = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl1);
+ assert(fid1 != FAIL);
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
+ assert(dataset1 != FAIL);
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
+ assert(dataset2 != FAIL);
+
+ /* set up dimensions of the slab this process accesses */
+ start[0] = mpi_rank * SPACE1_DIM1 / mpi_size;
+ start[1] = 0;
+ count[0] = SPACE1_DIM1 / mpi_size;
+ count[1] = SPACE1_DIM2;
+ stride[0] = 1;
+ stride[1] = 1;
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* fill dataset with test data */
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ assert(ret != FAIL);
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret != FAIL);
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ assert(ret != FAIL);
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret == 0);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ assert(ret != FAIL);
+ ret = H5Dclose(dataset2);
+ assert(ret != FAIL);
+
+ /* release all IDs created */
+ H5Sclose(file_dataspace);
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+phdf5writeAll(char *filename)
+{
+ hid_t fid1; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid1; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims1[SPACE1_RANK] = {SPACE1_DIM1, SPACE1_DIM2}; /* dataspace dim sizes */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+
+ hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Collective write test on file %s\n", filename);
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
+ acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ MESG("H5Pcreate access succeed");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
+ assert(ret != FAIL);
+ MESG("H5Pset_fapl_mpio succeed");
+
+ /* create the file collectively */
+ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl1);
+ assert(fid1 != FAIL);
+ MESG("H5Fcreate succeed");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and create the dataset
+ * ------------------------- */
+ /* setup dimensionality object */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ assert(sid1 != FAIL);
+ MESG("H5Screate_simple succeed");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ assert(dataset1 != FAIL);
+ MESG("H5Dcreate2 succeed");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ assert(dataset2 != FAIL);
+ MESG("H5Dcreate2 2 succeed");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of rows. */
+ slab_set(start, count, stride, BYROW);
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose) {
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of columns. */
+ slab_set(start, count, stride, BYCOL);
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose) {
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose) {
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All writes completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ assert(ret != FAIL);
+ MESG("H5Dclose1 succeed");
+ ret = H5Dclose(dataset2);
+ assert(ret != FAIL);
+ MESG("H5Dclose2 succeed");
+
+ /* release all IDs created */
+ H5Sclose(sid1);
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+phdf5readAll(char *filename)
+{
+ hid_t fid1; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+ DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
+
+ hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Collective read test on file %s\n", filename);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
+ acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ MESG("H5Pcreate access succeed");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
+ assert(ret != FAIL);
+ MESG("H5Pset_fapl_mpio succeed");
+
+ /* open the file collectively */
+ fid1 = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl1);
+ assert(fid1 != FAIL);
+ MESG("H5Fopen succeed");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+ /* --------------------------
+ * Open the datasets in it
+ * ------------------------- */
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
+ assert(dataset1 != FAIL);
+ MESG("H5Dopen2 succeed");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
+ assert(dataset2 != FAIL);
+ MESG("H5Dopen2 2 succeed");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of columns. */
+ slab_set(start, count, stride, BYCOL);
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* fill dataset with test data */
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
+ MESG("data_array initialized");
+ if (verbose) {
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dread succeed");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret != FAIL);
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of rows. */
+ slab_set(start, count, stride, BYROW);
+ if (verbose)
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
+ (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)(count[0] * count[1]));
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
+ assert(mem_dataspace != FAIL);
+
+ /* fill dataset with test data */
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
+ MESG("data_array initialized");
+ if (verbose) {
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dread succeed");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret != FAIL);
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All reads completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ assert(ret != FAIL);
+ MESG("H5Dclose1 succeed");
+ ret = H5Dclose(dataset2);
+ assert(ret != FAIL);
+ MESG("H5Dclose2 succeed");
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
+/*
+ * test file access by communicator besides COMM_WORLD.
+ * Split COMM_WORLD into two, one (even_comm) contains the original
+ * processes of even ranks. The other (odd_comm) contains the original
+ * processes of odd ranks. Processes in even_comm creates a file, then
+ * cloose it, using even_comm. Processes in old_comm just do a barrier
+ * using odd_comm. Then they all do a barrier using COMM_WORLD.
+ * If the file creation and cloose does not do correct collective action
+ * according to the communicator argument, the processes will freeze up
+ * sooner or later due to barrier mixed up.
+ */
+void
+test_split_comm_access(char filenames[][PATH_MAX])
+{
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int color, mrc;
+ int newrank, newprocs;
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
+
+ if (verbose)
+ printf("Independent write test on file %s %s\n", filenames[0], filenames[1]);
+
+ color = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm);
+ assert(mrc == MPI_SUCCESS);
+ MPI_Comm_size(comm, &newprocs);
+ MPI_Comm_rank(comm, &newrank);
+
+ if (color) {
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ assert(mrc == MPI_SUCCESS);
+ }
+ else {
+ /* even-rank processes */
+ /* setup file access template */
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ assert(acc_tpl != FAIL);
+
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(acc_tpl, comm, info);
+ assert(ret != FAIL);
+
+ /* create the file collectively */
+ fid = H5Fcreate(filenames[color], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ assert(fid != FAIL);
+ MESG("H5Fcreate succeed");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ assert(ret != FAIL);
+
+ ret = H5Fclose(fid);
+ assert(ret != FAIL);
+ }
+ if (mpi_rank == 0) {
+ mrc = MPI_File_delete(filenames[color], info);
+ assert(mrc == MPI_SUCCESS);
+ }
+ MPI_Comm_free(&comm);
+}
+
+/*
+ * Show command usage
+ */
+void
+usage(void)
+{
+ printf("Usage: testphdf5 [-f <prefix>] [-r] [-w] [-v]\n");
+ printf("\t-f\tfile prefix for parallel test files.\n");
+ printf("\t \t e.g. pfs:/PFS/myname\n");
+ printf("\t \tcan be set via $" PARAPREFIX ".\n");
+ printf("\t \tDefault is current directory.\n");
+ printf("\t-c\tno cleanup\n");
+ printf("\t-r\tno read\n");
+ printf("\t-w\tno write\n");
+ printf("\t-v\tverbose on\n");
+ printf("\tdefault do write then read\n");
+ printf("\n");
+}
+
+/*
+ * compose the test filename with the prefix supplied.
+ * return code: 0 if no error
+ * 1 otherwise.
+ */
+int
+mkfilenames(char *prefix)
+{
+ int i, n;
+ size_t strsize;
+
+ /* filename will be prefix/ParaEgN.h5 where N is 0 to 9. */
+ /* So, string must be big enough to hold the prefix, / and 10 more chars */
+ /* and the terminating null. */
+ strsize = strlen(prefix) + 12;
+ if (strsize > PATH_MAX) {
+ printf("File prefix too long; Use a short path name.\n");
+ return (1);
+ }
+ n = sizeof(testfiles) / sizeof(testfiles[0]);
+ if (n > 9) {
+ printf("Warning: Too many entries in testfiles. "
+ "Need to adjust the code to accommodate the large size.\n");
+ }
+ for (i = 0; i < n; i++) {
+ snprintf(testfiles[i], PATH_MAX, "%s/ParaEg%d.h5", prefix, i);
+ }
+ return (0);
+}
+
+/*
+ * parse the command line options
+ */
+int
+parse_options(int argc, char **argv)
+{
+ int i, n;
+
+ /* initialize testfiles to nulls */
+ n = sizeof(testfiles) / sizeof(testfiles[0]);
+ for (i = 0; i < n; i++) {
+ testfiles[i][0] = '\0';
+ }
+
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'f':
+ ++argv;
+ if (--argc < 1) {
+ usage();
+ nerrors++;
+ return (1);
+ }
+ if (mkfilenames(*argv)) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'c':
+ docleanup = 0; /* no cleanup */
+ break;
+ case 'r':
+ doread = 0;
+ break;
+ case 'w':
+ dowrite = 0;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ usage();
+ nerrors++;
+ return (1);
+ }
+ }
+ }
+
+ /* check the file prefix */
+ if (testfiles[0][0] == '\0') {
+ /* try get it from environment variable HDF5_PARAPREFIX */
+ char *env;
+ char *env_default = "."; /* default to current directory */
+ if ((env = getenv(PARAPREFIX)) == NULL) {
+ env = env_default;
+ }
+ mkfilenames(env);
+ }
+ return (0);
+}
+
+/*
+ * cleanup test files created
+ */
+void
+cleanup(void)
+{
+ int i, n;
+
+ n = sizeof(testfiles) / sizeof(testfiles[0]);
+ for (i = 0; i < n; i++) {
+ MPI_File_delete(testfiles[i], MPI_INFO_NULL);
+ }
+}
+
+/* Main Program */
+int
+main(int argc, char **argv)
+{
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+ int i, n;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Get_processor_name(mpi_name, &mpi_namelen);
+ /* Make sure datasets can be divided into equal chunks by the processes */
+ if ((SPACE1_DIM1 % mpi_size) || (SPACE1_DIM2 % mpi_size)) {
+ printf("DIM1(%d) and DIM2(%d) must be multiples of processes (%d)\n", SPACE1_DIM1, SPACE1_DIM2,
+ mpi_size);
+ nerrors++;
+ goto finish;
+ }
+
+ if (parse_options(argc, argv) != 0)
+ goto finish;
+
+ /* show test file names */
+ if (mpi_rank == 0) {
+ n = sizeof(testfiles) / sizeof(testfiles[0]);
+ printf("Parallel test files are:\n");
+ for (i = 0; i < n; i++) {
+ printf(" %s\n", testfiles[i]);
+ }
+ }
+
+ if (dowrite) {
+ MPI_BANNER("testing PHDF5 dataset using split communicators...");
+ test_split_comm_access(testfiles);
+ MPI_BANNER("testing PHDF5 dataset independent write...");
+ phdf5writeInd(testfiles[0]);
+ MPI_BANNER("testing PHDF5 dataset collective write...");
+ phdf5writeAll(testfiles[1]);
+ }
+ if (doread) {
+ MPI_BANNER("testing PHDF5 dataset independent read...");
+ phdf5readInd(testfiles[0]);
+ MPI_BANNER("testing PHDF5 dataset collective read...");
+ phdf5readAll(testfiles[1]);
+ }
+
+ if (!(dowrite || doread)) {
+ usage();
+ nerrors++;
+ }
+
+finish:
+ if (mpi_rank == 0) { /* only process 0 reports */
+ if (nerrors)
+ printf("***PHDF5 example detected %d errors***\n", nerrors);
+ else {
+ printf("=====================================\n");
+ printf("PHDF5 example finished with no errors\n");
+ printf("=====================================\n");
+ }
+ }
+ if (docleanup)
+ cleanup();
+ MPI_Finalize();
+
+ return (nerrors);
+}
+
+#else /* H5_HAVE_PARALLEL */
+/* dummy program since H5_HAVE_PARALLE is not configured in */
+int
+main(void)
+{
+ printf("No PHDF5 example because parallel is not configured in\n");
+ return (0);
+}
+#endif /* H5_HAVE_PARALLEL */