summaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/CMakeLists.txt42
-rw-r--r--examples/CMakeTests.cmake43
-rw-r--r--examples/Makefile.am33
-rw-r--r--examples/h5_vds-eiger.c179
-rw-r--r--examples/h5_vds-exc.c217
-rw-r--r--examples/h5_vds-exclim.c214
-rw-r--r--examples/h5_vds-percival-unlim-maxmin.c304
-rw-r--r--examples/h5_vds-percival-unlim.c346
-rw-r--r--examples/h5_vds-percival.c241
-rw-r--r--examples/h5_vds-simpleIO.c190
-rw-r--r--examples/h5_vds.c252
-rw-r--r--examples/ph5_filtered_writes.c488
-rw-r--r--examples/ph5_filtered_writes_no_sel.c369
-rw-r--r--examples/ph5_subfiling.c551
-rw-r--r--examples/ph5example.c1100
-rw-r--r--examples/run-c-ex.sh.in18
16 files changed, 3 insertions, 4584 deletions
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 10c6ede..43d7af2 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -32,28 +32,8 @@ set (examples
h5_elink_unix2win
h5_shared_mesg
h5_debug_trace
- h5_vds
- h5_vds-exc
- h5_vds-exclim
- h5_vds-eiger
- h5_vds-simpleIO
- h5_vds-percival
- h5_vds-percival-unlim
- h5_vds-percival-unlim-maxmin
)
-if (H5_HAVE_PARALLEL)
- set (parallel_examples
- ph5example
- ph5_filtered_writes
- ph5_filtered_writes_no_sel
- )
-
- if (HDF5_ENABLE_SUBFILING_VFD)
- list (APPEND parallel_examples ph5_subfiling)
- endif ()
-endif ()
-
foreach (example ${examples})
add_executable (${example} ${HDF5_EXAMPLES_SOURCE_DIR}/${example}.c)
target_include_directories (${example} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
@@ -74,28 +54,6 @@ foreach (example ${examples})
endif ()
endforeach ()
-if (H5_HAVE_PARALLEL)
- foreach (parallel_example ${parallel_examples})
- add_executable (${parallel_example} ${HDF5_EXAMPLES_SOURCE_DIR}/${parallel_example}.c)
- target_include_directories (${parallel_example} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
- if (NOT BUILD_SHARED_LIBS)
- TARGET_C_PROPERTIES (${parallel_example} STATIC)
- target_link_libraries (${parallel_example} PRIVATE ${HDF5_LIB_TARGET} MPI::MPI_C)
- else ()
- TARGET_C_PROPERTIES (${parallel_example} SHARED)
- target_link_libraries (${parallel_example} PRIVATE ${HDF5_LIBSH_TARGET} MPI::MPI_C)
- endif ()
- set_target_properties (${parallel_example} PROPERTIES FOLDER examples)
-
- #-----------------------------------------------------------------------------
- # Add Target to clang-format
- #-----------------------------------------------------------------------------
- if (HDF5_ENABLE_FORMATTERS)
- clang_format (HDF5_EXAMPLES_${parallel_example}_FORMAT ${parallel_example})
- endif ()
- endforeach ()
-endif ()
-
if (BUILD_TESTING AND HDF5_TEST_EXAMPLES)
include (CMakeTests.cmake)
endif ()
diff --git a/examples/CMakeTests.cmake b/examples/CMakeTests.cmake
index 449bc44..09f9060 100644
--- a/examples/CMakeTests.cmake
+++ b/examples/CMakeTests.cmake
@@ -30,8 +30,6 @@ set (test_ex_CLEANFILES
group.h5
groups.h5
hard_link.h5
- h5_subfiling_default_example.h5
- h5_subfiling_custom_example.h5
mount1.h5
mount2.h5
one_index_file.h5
@@ -54,19 +52,6 @@ set (test_ex_CLEANFILES
blue/prefix_target.h5
red/prefix_target.h5
u2w/u2w_target.h5
- vds.h5
- vds-exc.h5
- vds-excalibur.h5
- vds-exclim.h5
- vds-percival.h5
- vds-percival-unlim.h5
- vds-percival-unlim-maxmin.h5
- a.h5
- b.h5
- c.h5
- d.h5
- vds-simpleIO.h5
- vds-eiger.h5
)
if (HDF5_TEST_SERIAL)
@@ -110,31 +95,3 @@ if (HDF5_TEST_SERIAL)
set (last_test "EXAMPLES-${example}")
endforeach ()
endif ()
-
-### Windows pops up a modal permission dialog on this test
-if (H5_HAVE_PARALLEL AND HDF5_TEST_PARALLEL AND NOT WIN32)
- # Ensure that 24 is a multiple of the number of processes.
- # The number 24 corresponds to SPACE1_DIM1 and SPACE1_DIM2 defined in ph5example.c
- math(EXPR NUMPROCS "24 / ((24 + ${MPIEXEC_MAX_NUMPROCS} - 1) / ${MPIEXEC_MAX_NUMPROCS})")
-
- foreach (parallel_example ${parallel_examples})
- if (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${parallel_example}> ${MPIEXEC_POSTFLAGS})
- else ()
- add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE}"
- -D "TEST_ARGS:STRING=${MPIEXEC_NUMPROC_FLAG};${NUMPROCS};${MPIEXEC_PREFLAGS};$<TARGET_FILE:${parallel_example}>;${MPIEXEC_POSTFLAGS}"
- -D "TEST_EXPECT=0"
- -D "TEST_SKIP_COMPARE=TRUE"
- -D "TEST_OUTPUT=${parallel_example}.out"
- -D "TEST_REFERENCE:STRING=PHDF5 example finished with no errors"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
- -P "${HDF_RESOURCES_DIR}/grepTest.cmake"
- )
- endif ()
- if (last_test)
- set_tests_properties (MPI_TEST_EXAMPLES-${parallel_example} PROPERTIES DEPENDS ${last_test})
- endif ()
- set (last_test "MPI_TEST_EXAMPLES-${parallel_example}")
- endforeach ()
-endif ()
diff --git a/examples/Makefile.am b/examples/Makefile.am
index a09042c..508664b 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -18,15 +18,6 @@
include $(top_srcdir)/config/commence.am
-if BUILD_PARALLEL_CONDITIONAL
- EXAMPLE_PROG_PARA = ph5example ph5_filtered_writes ph5_filtered_writes_no_sel
-
-if SUBFILING_VFD_CONDITIONAL
- EXAMPLE_PROG_PARA += ph5_subfiling
-endif
-
-endif
-
INSTALL_SCRIPT_FILES = run-c-ex.sh
INSTALL_TOP_SCRIPT_FILES = run-all-ex.sh
INSTALL_TOP_FILES = README
@@ -40,9 +31,7 @@ EXAMPLE_PROG = h5_write h5_read h5_extend_write h5_chunk_read h5_compound \
h5_crtatt h5_crtgrp h5_crtdat \
h5_group h5_select h5_attribute h5_mount h5_drivers \
h5_reference_deprec h5_ref_extern h5_ref_compat h5_ref2reg_deprec \
- h5_extlink h5_elink_unix2win h5_shared_mesg h5_debug_trace \
- h5_vds h5_vds-exc h5_vds-exclim h5_vds-eiger h5_vds-simpleIO \
- h5_vds-percival h5_vds-percival-unlim h5_vds-percival-unlim-maxmin
+ h5_extlink h5_elink_unix2win h5_shared_mesg h5_debug_trace
TEST_SCRIPT=testh5cc.sh
TEST_EXAMPLES_SCRIPT=$(INSTALL_SCRIPT_FILES)
@@ -53,13 +42,7 @@ INSTALL_FILES = h5_write.c h5_read.c h5_extend_write.c h5_chunk_read.c h5_compou
h5_crtatt.c h5_crtgrp.c h5_crtdat.c \
h5_group.c h5_select.c h5_attribute.c h5_mount.c h5_drivers.c \
h5_reference_deprec.c h5_ref_extern.c h5_ref_compat.c h5_ref2reg_deprec.c \
- h5_extlink.c h5_elink_unix2win.c h5_shared_mesg.c h5_debug_trace.c \
- ph5example.c ph5_filtered_writes.c ph5_filtered_writes_no_sel.c \
- ph5_subfiling.c h5_vds.c h5_vds-exc.c h5_vds-exclim.c h5_vds-eiger.c \
- h5_vds-simpleIO.c h5_vds-percival.c h5_vds-percival-unlim.c \
- h5_vds-percival-unlim-maxmin.c
-
-
+ h5_extlink.c h5_elink_unix2win.c h5_shared_mesg.c h5_debug_trace.c
# How to build examples, using installed version of h5cc
if BUILD_PARALLEL_CONDITIONAL
@@ -123,22 +106,10 @@ h5_ref_extern: $(srcdir)/h5_ref_extern.c
h5_reference_deprec: $(srcdir)/h5_reference_deprec.c
h5_ref2reg_deprec: $(srcdir)/h5_ref2reg_deprec.c
h5_drivers: $(srcdir)/h5_drivers.c
-ph5example: $(srcdir)/ph5example.c
-ph5_filtered_writes: $(srcdir)/ph5_filtered_writes.c
-ph5_filtered_writes_no_sel: $(srcdir)/ph5_filtered_writes_no_sel.c
-ph5_subfiling: $(srcdir)/ph5_subfiling.c
h5_dtransform: $(srcdir)/h5_dtransform.c
h5_extlink: $(srcdir)/h5_extlink.c $(EXTLINK_DIRS)
h5_elink_unix2win: $(srcdir)/h5_elink_unix2win.c $(EXTLINK_DIRS)
h5_shared_mesg: $(srcdir)/h5_shared_mesg.c
-h5_vds: $(srcdir)/h5_vds.c
-h5_vds-exc: $(srcdir)/h5_vds-exc.c
-h5_vds-exclim: $(srcdir)/h5_vds-exclim.c
-h5_vds-eiger: $(srcdir)/h5_vds-eiger.c
-h5_vds-simpleIO: $(srcdir)/h5_vds-simpleIO.c
-h5_vds-percival: $(srcdir)/h5_vds-percival.c
-h5_vds-percival-unlim: $(srcdir)/h5_vds-percival-unlim.c
-h5_vds-percival-unlim-maxmin: $(srcdir)/h5_vds-percival-unlim-maxmin.c
if BUILD_SHARED_SZIP_CONDITIONAL
LD_LIBRARY_PATH=$(LL_PATH)
diff --git a/examples/h5_vds-eiger.c b/examples/h5_vds-eiger.c
deleted file mode 100644
index fcde490..0000000
--- a/examples/h5_vds-eiger.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Eiger use case. Every 5 frames 10x10 are in the source
- dataset "/A" in file with the name f-<#>.h5
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds-eiger.h5"
-#define DATASET "VDS-Eiger"
-#define VDSDIM0 5
-#define VDSDIM1 10
-#define VDSDIM2 10
-#define DIM0 5
-#define DIM1 10
-#define DIM2 10
-#define RANK 3
-
-int
-main(void)
-{
- hid_t file, src_space, vspace, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {H5S_UNLIMITED, VDSDIM1, VDSDIM1},
- dims[3] = {DIM0, DIM1, DIM2}, start[3], /* Hyperslab parameters */
- stride[3], count[3], block[3];
- hsize_t start_out[3], /* Hyperslab parameter out */
- stride_out[3], count_out[3], block_out[3];
- int i;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
-
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- vspace = H5Screate_simple(RANK, vdsdims, vdsdims_max);
-
- /* Create dataspaces for the source dataset. */
- src_space = H5Screate_simple(RANK, dims, NULL);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- stride[0] = DIM0;
- stride[1] = 1;
- stride[2] = 1;
- count[0] = H5S_UNLIMITED;
- count[1] = 1;
- count[2] = 1;
- block[0] = DIM0;
- block[1] = DIM1;
- block[2] = DIM2;
-
- /*
- * Build the mappings
- *
- */
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, stride, count, block);
- status = H5Pset_virtual(dcpl, vspace, "f-%b.h5", "/A", src_space);
-
- /* Create a virtual dataset */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(vspace);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %d\n", (int)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset ");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_ALL) {
- printf("H5S_ALL \n");
- }
- /* EIP read data back */
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
-
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- return 0;
-}
diff --git a/examples/h5_vds-exc.c b/examples/h5_vds-exc.c
deleted file mode 100644
index 01597cc..0000000
--- a/examples/h5_vds-exc.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Excalibur use case with k=2 and m=3.
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds-exc.h5"
-#define DATASET "VDS-Excalibur"
-#define VDSDIM0 0
-#define VDSDIM1 15
-#define VDSDIM2 6
-#define KDIM0 0
-#define KDIM1 2
-#define KDIM2 6
-#define NDIM0 0
-#define NDIM1 3
-#define NDIM2 6
-#define RANK 3
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5", "d.h5", "e.h5", "f.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C", "D", "E", "F"};
-
-int
-main(void)
-{
- hid_t file, space, ksrc_space, nsrc_space, vspace, src_space, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {H5S_UNLIMITED, VDSDIM1, VDSDIM2},
- kdims[3] = {KDIM0, KDIM1, KDIM2}, kdims_max[3] = {H5S_UNLIMITED, KDIM1, KDIM2},
- ndims[3] = {NDIM0, NDIM1, NDIM2}, ndims_max[3] = {H5S_UNLIMITED, NDIM1, NDIM2},
- start[3], /* Hyperslab parameters */
- count[3], block[3];
- hsize_t start_out[3], stride_out[3], count_out[3], block_out[3];
- int k = 2;
- int n = 3;
- int i;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
-
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- space = H5Screate_simple(RANK, vdsdims, vdsdims_max);
- /* Create dataspaces for A, C, and E datasets. */
- ksrc_space = H5Screate_simple(RANK, kdims, kdims_max);
- /* Create dataspaces for B, D, and F datasets. */
- nsrc_space = H5Screate_simple(RANK, ndims, ndims_max);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- count[0] = H5S_UNLIMITED;
- count[1] = 1;
- count[2] = 1;
- block[0] = 1;
- block[1] = k;
- block[2] = VDSDIM2;
-
- /*
- * Build the mappings for A, C and E source datasets.
- * Unlimited hyperslab selection is the same in the source datasets.
- * Unlimited hyperslab selections in the virtual dataset have different offsets.
- */
- status = H5Sselect_hyperslab(ksrc_space, H5S_SELECT_SET, start, NULL, count, block);
- for (i = 0; i < 3; i++) {
- start[1] = (hsize_t)((k + n) * i);
- status = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Pset_virtual(dcpl, space, SRC_FILE[2 * i], SRC_DATASET[2 * i], ksrc_space);
- }
-
- /* Reinitialize start[1] and block[1] to build the second set of mappings. */
- start[1] = 0;
- block[1] = n;
- /*
- * Build the mappings for B, D and F source datasets.
- * Unlimited hyperslab selection is the same in the source datasets.
- * Unlimited hyperslab selections in the virtual dataset have different offsets.
- */
- status = H5Sselect_hyperslab(nsrc_space, H5S_SELECT_SET, start, NULL, count, block);
- for (i = 0; i < 3; i++) {
- start[1] = (hsize_t)(k + (k + n) * i);
- status = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Pset_virtual(dcpl, space, SRC_FILE[2 * i + 1], SRC_DATASET[2 * i + 1], nsrc_space);
- }
-
- /* Create a virtual dataset */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(space);
- status = H5Sclose(nsrc_space);
- status = H5Sclose(ksrc_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf("Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset \n");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(src_space, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /* EIP read data back */
-
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- return 0;
-}
diff --git a/examples/h5_vds-exclim.c b/examples/h5_vds-exclim.c
deleted file mode 100644
index 4fb5536..0000000
--- a/examples/h5_vds-exclim.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Excalibur use case with k=2 and m=3 and only 3 planes in
- Z-direction (i.e., not unlimited).
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds-exclim.h5"
-#define DATASET "VDS-Excaliburlim"
-#define VDSDIM0 3
-#define VDSDIM1 15
-#define VDSDIM2 6
-#define KDIM0 3
-#define KDIM1 2
-#define KDIM2 6
-#define NDIM0 3
-#define NDIM1 3
-#define NDIM2 6
-#define RANK 3
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5", "d.h5", "e.h5", "f.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C", "D", "E", "F"};
-
-int
-main(void)
-{
- hid_t file, space, ksrc_space, nsrc_space, vspace, src_space, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, kdims[3] = {KDIM0, KDIM1, KDIM2},
- ndims[3] = {NDIM0, NDIM1, NDIM2}, start[3], /* Hyperslab parameters */
- count[3], block[3];
- hsize_t start_out[3], stride_out[3], count_out[3], block_out[3];
- int k = 2;
- int n = 3;
- int i;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
-
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- space = H5Screate_simple(RANK, vdsdims, NULL);
- /* Create dataspaces for A, C, and E datasets. */
- ksrc_space = H5Screate_simple(RANK, kdims, NULL);
- /* Create dataspaces for B, D, and F datasets. */
- nsrc_space = H5Screate_simple(RANK, ndims, NULL);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- count[0] = VDSDIM0;
- count[1] = 1;
- count[2] = 1;
- block[0] = 1;
- block[1] = k;
- block[2] = VDSDIM2;
-
- /*
- * Build the mappings for A, C and E source datasets.
- *
- */
- status = H5Sselect_hyperslab(ksrc_space, H5S_SELECT_SET, start, NULL, count, block);
- for (i = 0; i < 3; i++) {
- start[1] = (hsize_t)((k + n) * i);
- status = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Pset_virtual(dcpl, space, SRC_FILE[2 * i], SRC_DATASET[2 * i], ksrc_space);
- }
-
- /* Reinitialize start[0] and block[1] */
- start[0] = 0;
- block[1] = n;
- /*
- * Build the mappings for B, D and F source datasets.
- *
- */
- status = H5Sselect_hyperslab(nsrc_space, H5S_SELECT_SET, start, NULL, count, block);
- for (i = 0; i < 3; i++) {
- start[1] = (hsize_t)(k + (k + n) * i);
- status = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Pset_virtual(dcpl, space, SRC_FILE[2 * i + 1], SRC_DATASET[2 * i + 1], nsrc_space);
- }
-
- /* Create a virtual dataset */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(space);
- status = H5Sclose(nsrc_space);
- status = H5Sclose(ksrc_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf("Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset \n");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%d, %d, %d] \n", (int)start_out[0], (int)start_out[1],
- (int)start_out[2]);
- printf(" stride = [%d, %d, %d] \n", (int)stride_out[0], (int)stride_out[1],
- (int)stride_out[2]);
- printf(" count = [%d, %d, %d] \n", (int)count_out[0], (int)count_out[1],
- (int)count_out[2]);
- printf(" block = [%d, %d, %d] \n", (int)block_out[0], (int)block_out[1],
- (int)block_out[2]);
- }
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /* EIP read data back */
-
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- return 0;
-}
diff --git a/examples/h5_vds-percival-unlim-maxmin.c b/examples/h5_vds-percival-unlim-maxmin.c
deleted file mode 100644
index 9ef514d..0000000
--- a/examples/h5_vds-percival-unlim-maxmin.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Percival use case. Every fifth 10x10 plane in VDS is stored in
- the corresponding 3D unlimited dataset.
- There are 4 source datasets total.
- Each of the source datasets is extended to different sizes.
- VDS access property can be used to get max and min extent.
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define VFILE "vds-percival-unlim-maxmin.h5"
-#define DATASET "VDS-Percival-unlim-maxmin"
-#define VDSDIM0 H5S_UNLIMITED
-#define VDSDIM1 10
-#define VDSDIM2 10
-
-#define DIM0 H5S_UNLIMITED
-#define DIM0_1 4 /* Initial size of the source datasets */
-#define DIM1 10
-#define DIM2 10
-#define RANK 3
-#define PLANE_STRIDE 4
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5", "d.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C", "D"};
-
-int
-main(void)
-{
- hid_t vfile, file, src_space, mem_space, vspace, vdset, dset; /* Handles */
- hid_t dcpl, dapl;
- herr_t status;
- hsize_t vdsdims[3] = {4 * DIM0_1, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {VDSDIM0, VDSDIM1, VDSDIM2},
- dims[3] = {DIM0_1, DIM1, DIM2}, memdims[3] = {DIM0_1, DIM1, DIM2},
- extdims[3] = {0, DIM1, DIM2}, /* Dimensions of the extended source datasets */
- chunk_dims[3] = {DIM0_1, DIM1, DIM2}, dims_max[3] = {DIM0, DIM1, DIM2}, vdsdims_out[3],
- vdsdims_max_out[3], start[3], /* Hyperslab parameters */
- stride[3], count[3], src_count[3], block[3];
- hsize_t start_out[3], /* Hyperslab parameter out */
- stride_out[3], count_out[3], block_out[3];
- int i, j;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
- int wdata[DIM0_1 * DIM1 * DIM2];
-
- /*
- * Create source files and datasets. This step is optional.
- */
- for (i = 0; i < PLANE_STRIDE; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < DIM0_1 * DIM1 * DIM2; j++)
- wdata[j] = i + 1;
-
- /*
- * Create the source files and datasets. Write data to each dataset and
- * close all resources.
- */
-
- file = H5Fcreate(SRC_FILE[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- src_space = H5Screate_simple(RANK, dims, dims_max);
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- status = H5Pset_chunk(dcpl, RANK, chunk_dims);
- dset = H5Dcreate2(file, SRC_DATASET[i], H5T_NATIVE_INT, src_space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
- status = H5Sclose(src_space);
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- vfile = H5Fcreate(VFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- vspace = H5Screate_simple(RANK, vdsdims, vdsdims_max);
-
- /* Create dataspaces for the source dataset. */
- src_space = H5Screate_simple(RANK, dims, dims_max);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- stride[0] = PLANE_STRIDE; /* we will select every fifth plane in VDS */
- stride[1] = 1;
- stride[2] = 1;
- count[0] = H5S_UNLIMITED;
- count[1] = 1;
- count[2] = 1;
- src_count[0] = H5S_UNLIMITED;
- src_count[1] = 1;
- src_count[2] = 1;
- block[0] = 1;
- block[1] = DIM1;
- block[2] = DIM2;
-
- /*
- * Build the mappings
- *
- */
- status = H5Sselect_hyperslab(src_space, H5S_SELECT_SET, start, NULL, src_count, block);
- for (i = 0; i < PLANE_STRIDE; i++) {
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, stride, count, block);
- status = H5Pset_virtual(dcpl, vspace, SRC_FILE[i], SRC_DATASET[i], src_space);
- start[0]++;
- }
-
- H5Sselect_none(vspace);
-
- /* Create a virtual dataset */
- vdset = H5Dcreate2(vfile, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(vspace);
- status = H5Sclose(src_space);
- status = H5Pclose(dcpl);
-
- /* Let's add data to the source datasets and check new dimensions for VDS */
- /* We will add only one plane to the first source dataset, two planes to the
- second one, three to the third, and four to the forth. */
-
- for (i = 0; i < PLANE_STRIDE; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < (i + 1) * DIM1 * DIM2; j++)
- wdata[j] = 10 * (i + 1);
-
- /*
- * Open the source files and datasets. Append data to each dataset and
- * close all resources.
- */
-
- file = H5Fopen(SRC_FILE[i], H5F_ACC_RDWR, H5P_DEFAULT);
- dset = H5Dopen2(file, SRC_DATASET[i], H5P_DEFAULT);
- extdims[0] = DIM0_1 + i + 1;
- status = H5Dset_extent(dset, extdims);
- src_space = H5Dget_space(dset);
- start[0] = DIM0_1;
- start[1] = 0;
- start[2] = 0;
- count[0] = 1;
- count[1] = 1;
- count[2] = 1;
- block[0] = i + 1;
- block[1] = DIM1;
- block[2] = DIM2;
-
- memdims[0] = i + 1;
- mem_space = H5Screate_simple(RANK, memdims, NULL);
- status = H5Sselect_hyperslab(src_space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Dwrite(dset, H5T_NATIVE_INT, mem_space, src_space, H5P_DEFAULT, wdata);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- status = H5Dclose(vdset);
- status = H5Fclose(vfile);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- vfile = H5Fopen(VFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
-
- /*
- * Open VDS using different access properties to use max or
- * min extents depending on the sizes of the underlying datasets
- */
- dapl = H5Pcreate(H5P_DATASET_ACCESS);
-
- for (i = 0; i < 2; i++) {
- status = H5Pset_virtual_view(dapl, i ? H5D_VDS_LAST_AVAILABLE : H5D_VDS_FIRST_MISSING);
- vdset = H5Dopen2(vfile, DATASET, dapl);
-
- /* Let's get space of the VDS and its dimension; we should get 32(or 20)x10x10 */
- vspace = H5Dget_space(vdset);
- H5Sget_simple_extent_dims(vspace, vdsdims_out, vdsdims_max_out);
- printf("VDS dimensions, bounds = H5D_VDS_%s: ", i ? "LAST_AVAILABLE" : "FIRST_MISSING");
- for (j = 0; j < RANK; j++)
- printf(" %d ", (int)vdsdims_out[j]);
- printf("\n");
-
- /* Close */
- status = H5Dclose(vdset);
- status = H5Sclose(vspace);
- }
-
- status = H5Pclose(dapl);
-
- vdset = H5Dopen2(vfile, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(vdset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset \n");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(src_space)) {
- status = H5Sget_regular_hyperslab(src_space, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(vdset);
- status = H5Fclose(vfile);
- return 0;
-}
diff --git a/examples/h5_vds-percival-unlim.c b/examples/h5_vds-percival-unlim.c
deleted file mode 100644
index ddbcdec..0000000
--- a/examples/h5_vds-percival-unlim.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Percival use case. Every fifth 10x10 plane in VDS is stored in
- the corresponding 3D unlimited dataset.
- There are 4 source datasets total.
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define VFILE "vds-percival-unlim.h5"
-#define DATASET "VDS-Percival-unlim"
-#define VDSDIM0 H5S_UNLIMITED
-#define VDSDIM1 10
-#define VDSDIM2 10
-
-#define DIM0 H5S_UNLIMITED
-#define DIM0_1 10 /* Initial size of the datasets */
-#define DIM1 10
-#define DIM2 10
-#define RANK 3
-#define PLANE_STRIDE 4
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5", "d.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C", "D"};
-
-int
-main(void)
-{
- hid_t vfile, file, src_space, mem_space, vspace, vdset, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[3] = {4 * DIM0_1, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {VDSDIM0, VDSDIM1, VDSDIM2},
- dims[3] = {DIM0_1, DIM1, DIM2}, extdims[3] = {2 * DIM0_1, DIM1, DIM2},
- chunk_dims[3] = {DIM0_1, DIM1, DIM2}, dims_max[3] = {DIM0, DIM1, DIM2}, vdsdims_out[3],
- vdsdims_max_out[3], start[3], /* Hyperslab parameters */
- stride[3], count[3], src_count[3], block[3];
- hsize_t start_out[3], /* Hyperslab parameter out */
- stride_out[3], count_out[3], block_out[3];
- int i, j, k;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
- int wdata[DIM0_1 * DIM1 * DIM2];
- int rdata[80][10][10];
- int a_rdata[20][10][10];
-
- /*
- * Create source files and datasets. This step is optional.
- */
- for (i = 0; i < PLANE_STRIDE; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < DIM0_1 * DIM1 * DIM2; j++)
- wdata[j] = i + 1;
-
- /*
- * Create the source files and datasets. Write data to each dataset and
- * close all resources.
- */
-
- file = H5Fcreate(SRC_FILE[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- src_space = H5Screate_simple(RANK, dims, dims_max);
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- status = H5Pset_chunk(dcpl, RANK, chunk_dims);
- dset = H5Dcreate2(file, SRC_DATASET[i], H5T_NATIVE_INT, src_space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
- status = H5Sclose(src_space);
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- vfile = H5Fcreate(VFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- vspace = H5Screate_simple(RANK, vdsdims, vdsdims_max);
-
- /* Create dataspaces for the source dataset. */
- src_space = H5Screate_simple(RANK, dims, dims_max);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- stride[0] = PLANE_STRIDE; /* we will select every fifth plane in VDS */
- stride[1] = 1;
- stride[2] = 1;
- count[0] = H5S_UNLIMITED;
- count[1] = 1;
- count[2] = 1;
- src_count[0] = H5S_UNLIMITED;
- src_count[1] = 1;
- src_count[2] = 1;
- block[0] = 1;
- block[1] = DIM1;
- block[2] = DIM2;
-
- /*
- * Build the mappings
- *
- */
- status = H5Sselect_hyperslab(src_space, H5S_SELECT_SET, start, NULL, src_count, block);
- for (i = 0; i < PLANE_STRIDE; i++) {
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, stride, count, block);
- status = H5Pset_virtual(dcpl, vspace, SRC_FILE[i], SRC_DATASET[i], src_space);
- start[0]++;
- }
-
- H5Sselect_none(vspace);
-
- /* Create a virtual dataset */
- vdset = H5Dcreate2(vfile, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(vspace);
- status = H5Sclose(src_space);
- status = H5Pclose(dcpl);
- /* Let's get space of the VDS and its dimension; we should get 40x10x10 */
- vspace = H5Dget_space(vdset);
- H5Sget_simple_extent_dims(vspace, vdsdims_out, vdsdims_max_out);
- printf("VDS dimensions first time \n");
- printf(" Current: ");
- for (i = 0; i < RANK; i++)
- printf(" %d ", (int)vdsdims_out[i]);
- printf("\n");
-
- /* Let's add data to the source datasets and check new dimensions for VDS */
-
- for (i = 0; i < PLANE_STRIDE; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < DIM0_1 * DIM1 * DIM2; j++)
- wdata[j] = 10 * (i + 1);
-
- /*
- * Create the source files and datasets. Write data to each dataset and
- * close all resources.
- */
-
- file = H5Fopen(SRC_FILE[i], H5F_ACC_RDWR, H5P_DEFAULT);
- dset = H5Dopen2(file, SRC_DATASET[i], H5P_DEFAULT);
- status = H5Dset_extent(dset, extdims);
- src_space = H5Dget_space(dset);
- start[0] = DIM0_1;
- start[1] = 0;
- start[2] = 0;
- count[0] = 1;
- count[1] = 1;
- count[2] = 1;
- block[0] = DIM0_1;
- block[1] = DIM1;
- block[2] = DIM2;
-
- mem_space = H5Screate_simple(RANK, dims, NULL);
- status = H5Sselect_hyperslab(src_space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Dwrite(dset, H5T_NATIVE_INT, mem_space, src_space, H5P_DEFAULT, wdata);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- status = H5Dclose(vdset);
- status = H5Fclose(vfile);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- vfile = H5Fopen(VFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- vdset = H5Dopen2(vfile, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(vdset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset \n");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(src_space)) {
- status = H5Sget_regular_hyperslab(src_space, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /*
- * Read data from VDS.
- */
- vspace = H5Dget_space(vdset);
- H5Sget_simple_extent_dims(vspace, vdsdims_out, vdsdims_max_out);
- printf("VDS dimensions second time \n");
- printf(" Current: ");
- for (i = 0; i < RANK; i++)
- printf(" %d ", (int)vdsdims_out[i]);
- printf("\n");
-
- /* Read all VDS data */
-
- /* EIP We should be able to do it by using H5S_ALL instead of making selection
- * or using H5Sselect_all from vspace.
- */
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- count[0] = 1;
- count[1] = 1;
- count[2] = 1;
- block[0] = vdsdims_out[0];
- block[1] = vdsdims_out[1];
- block[2] = vdsdims_out[2];
-
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, NULL, count, block);
- mem_space = H5Screate_simple(RANK, vdsdims_out, NULL);
- status = H5Dread(vdset, H5T_NATIVE_INT, mem_space, vspace, H5P_DEFAULT, rdata);
- printf(" All data: \n");
- for (i = 0; i < (int)vdsdims_out[0]; i++) {
- for (j = 0; j < (int)vdsdims_out[1]; j++) {
- printf("(%d, %d, 0)", i, j);
- for (k = 0; k < (int)vdsdims_out[2]; k++)
- printf(" %d ", rdata[i][j][k]);
- printf("\n");
- }
- }
- /* Read VDS, but only data mapeed to dataset a.h5 */
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- stride[0] = PLANE_STRIDE;
- stride[1] = 1;
- stride[2] = 1;
- count[0] = 2 * DIM0_1;
- count[1] = 1;
- count[2] = 1;
- block[0] = 1;
- block[1] = vdsdims_out[1];
- block[2] = vdsdims_out[2];
- dims[0] = 2 * DIM0_1;
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, stride, count, block);
- mem_space = H5Screate_simple(RANK, dims, NULL);
- status = H5Dread(vdset, H5T_NATIVE_INT, mem_space, vspace, H5P_DEFAULT, a_rdata);
- printf(" All data: \n");
- for (i = 0; i < 2 * DIM0_1; i++) {
- for (j = 0; j < (int)vdsdims_out[1]; j++) {
- printf("(%d, %d, 0)", i, j);
- for (k = 0; k < (int)vdsdims_out[2]; k++)
- printf(" %d ", a_rdata[i][j][k]);
- printf("\n");
- }
- }
- /*
- * Close and release resources.
- */
- status = H5Sclose(mem_space);
- status = H5Pclose(dcpl);
- status = H5Dclose(vdset);
- status = H5Fclose(vfile);
- return 0;
-}
diff --git a/examples/h5_vds-percival.c b/examples/h5_vds-percival.c
deleted file mode 100644
index 82c8ef4..0000000
--- a/examples/h5_vds-percival.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of the virtual dataset.
- Percival use case. Every fifth 10x10 plane in VDS is stored in
- the corresponding 3D unlimited dataset.
- EIP: For now we will use finite dimension.
- There are 4 source datasets total.
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds-percival.h5"
-#define DATASET "VDS-Percival"
-/* later
-#define VDSDIM0 H5S_UNLIMITED
-*/
-#define VDSDIM0 40
-#define VDSDIM1 10
-#define VDSDIM2 10
-/* later
-#define DIM0 H5S_UNLIMITED
-*/
-#define DIM0 10
-#define DIM1 10
-#define DIM2 10
-#define RANK 3
-#define PLANE_STRIDE 4
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5", "d.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C", "D"};
-
-int
-main(void)
-{
- hid_t file, src_space, vspace, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {VDSDIM0, VDSDIM1, VDSDIM2},
- dims[3] = {DIM0, DIM1, DIM2}, dims_max[3] = {DIM0, DIM1, DIM2},
- start[3], /* Hyperslab start parameter for VDS */
- stride[3], count[3], src_count[3], block[3];
- hsize_t start_out[3], /* Hyperslab parameter out */
- stride_out[3], count_out[3], block_out[3];
- int i, j;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
- int wdata[DIM0 * DIM1 * DIM2];
-
- /*
- * Create source files and datasets. This step is optional.
- */
- for (i = 0; i < PLANE_STRIDE; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < DIM0 * DIM1 * DIM2; j++)
- wdata[j] = i + 1;
-
- /*
- * Create the source files and datasets. Write data to each dataset and
- * close all resources.
- */
-
- file = H5Fcreate(SRC_FILE[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- src_space = H5Screate_simple(RANK, dims, NULL);
- dset = H5Dcreate2(file, SRC_DATASET[i], H5T_NATIVE_INT, src_space, H5P_DEFAULT, H5P_DEFAULT,
- H5P_DEFAULT);
- status = H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- vspace = H5Screate_simple(RANK, vdsdims, vdsdims_max);
-
- /* Create dataspaces for the source dataset. */
- src_space = H5Screate_simple(RANK, dims, dims_max);
-
- /* Create VDS creation property */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /* Initialize hyperslab values */
-
- start[0] = 0;
- start[1] = 0;
- start[2] = 0;
- stride[0] = PLANE_STRIDE; /* we will select every fifth plane in VDS */
- stride[1] = 1;
- stride[2] = 1;
- /* later
- count[0] = H5S_UNLIMITED;
- */
- count[0] = VDSDIM0 / 4;
- count[1] = 1;
- count[2] = 1;
- /* later
- src_count[0] = H5S_UNLIMITED;
- */
- src_count[0] = DIM0;
- src_count[1] = 1;
- src_count[2] = 1;
- block[0] = 1;
- block[1] = DIM1;
- block[2] = DIM2;
-
- /*
- * Build the mappings
- *
- */
- status = H5Sselect_hyperslab(src_space, H5S_SELECT_SET, start, NULL, src_count, block);
- for (i = 0; i < PLANE_STRIDE; i++) {
- status = H5Sselect_hyperslab(vspace, H5S_SELECT_SET, start, stride, count, block);
- status = H5Pset_virtual(dcpl, vspace, SRC_FILE[i], SRC_DATASET[i], src_space);
- start[0]++;
- }
-
- H5Sselect_none(vspace);
-
- /* Create a virtual dataset */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(vspace);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open file and dataset using the default properties.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
-
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset \n");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- /* Get source file name */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset */
- printf(" Selection in the source dataset \n");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
- if (H5Sget_select_type(src_space) == H5S_SEL_HYPERSLABS) {
- if (H5Sis_regular_hyperslab(src_space)) {
- status = H5Sget_regular_hyperslab(src_space, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1], (unsigned long long)start_out[2]);
- printf(" stride = [%llu, %llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1], (unsigned long long)stride_out[2]);
- printf(" count = [%llu, %llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1], (unsigned long long)count_out[2]);
- printf(" block = [%llu, %llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1], (unsigned long long)block_out[2]);
- }
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- return 0;
-}
diff --git a/examples/h5_vds-simpleIO.c b/examples/h5_vds-simpleIO.c
deleted file mode 100644
index f516af9..0000000
--- a/examples/h5_vds-simpleIO.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/************************************************************
-
- This example illustrates the concept of virtual dataset I/O
- The program creates 2-dim source dataset and writes
- data to it. Then it creates 2-dim virtual dataset that has
- the same dimension sizes and maps the all elements of the
- virtual dataset to all elements of the source dataset.
- Then VDS is read back.
-
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds-simpleIO.h5"
-#define DATASET "VDS"
-#define DIM1 6
-#define DIM0 4
-#define RANK 2
-
-#define SRC_FILE "a.h5"
-#define SRC_DATASET "/A"
-
-int
-main(void)
-{
- hid_t file, space, src_space, vspace, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[2] = {DIM0, DIM1}, /* Virtual dataset dimension */
- dims[2] = {DIM0, DIM1}; /* Source dataset dimensions */
- int wdata[DIM0][DIM1], /* Write buffer for source dataset */
- rdata[DIM0][DIM1], /* Read buffer for virtual dataset */
- i, j;
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
- /*
- * Initialize data.
- */
- for (i = 0; i < DIM0; i++)
- for (j = 0; j < DIM1; j++)
- wdata[i][j] = i + 1;
-
- /*
- * Create the source file and the dataset. Write data to the source dataset
- * and close all resources.
- */
-
- file = H5Fcreate(SRC_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- space = H5Screate_simple(RANK, dims, NULL);
- dset = H5Dcreate2(file, SRC_DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- status = H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata[0]);
- status = H5Sclose(space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /* Create file in which virtual dataset will be stored. */
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- vspace = H5Screate_simple(RANK, vdsdims, NULL);
-
- /* Set VDS creation property. */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
-
- /*
- * Build the mappings.
- * Selections in the source datasets are H5S_ALL.
- * In the virtual dataset we select the first, the second and the third rows
- * and map each row to the data in the corresponding source dataset.
- */
- src_space = H5Screate_simple(RANK, dims, NULL);
- status = H5Pset_virtual(dcpl, vspace, SRC_FILE, SRC_DATASET, src_space);
-
- /* Create a virtual dataset. */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(vspace);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open the file and virtual dataset.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find the number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset ");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
-
- /* Make sure it is ALL selection and then print selection. */
- if (H5Sget_select_type(vspace) == H5S_SEL_ALL) {
- printf("Selection is H5S_ALL \n");
- }
- /* Get source file name. */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name. */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset. */
- printf(" Selection in the source dataset ");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
-
- /* Make sure it is ALL selection and then print selection. */
- if (H5Sget_select_type(src_space) == H5S_SEL_ALL) {
- printf("Selection is H5S_ALL \n");
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- }
- /*
- * Read the data using the default properties.
- */
- status = H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata[0]);
-
- /*
- * Output the data to the screen.
- */
- printf(" VDS Data:\n");
- for (i = 0; i < DIM0; i++) {
- printf(" [");
- for (j = 0; j < DIM1; j++)
- printf(" %3d", rdata[i][j]);
- printf("]\n");
- }
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- return 0;
-}
diff --git a/examples/h5_vds.c b/examples/h5_vds.c
deleted file mode 100644
index 96bd8a2..0000000
--- a/examples/h5_vds.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/************************************************************
-
- This example illustrates the concept of virtual dataset.
- The program creates three 1-dim source datasets and writes
- data to them. Then it creates a 2-dim virtual dataset and
- maps the first three rows of the virtual dataset to the data
- in the source datasets. Elements of a row are mapped to all
- elements of the corresponding source dataset.
- The fourth row is not mapped and will be filled with the fill
- values when virtual dataset is read back.
-
- The program closes all datasets, and then reopens the virtual
- dataset, and finds and prints its creation properties.
- Then it reads the values.
-
- This file is intended for use with HDF5 Library version 1.10
-
- ************************************************************/
-/* EIP Add link to the picture */
-
-#include "hdf5.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define FILE "vds.h5"
-#define DATASET "VDS"
-#define VDSDIM1 6
-#define VDSDIM0 4
-#define DIM0 6
-#define RANK1 1
-#define RANK2 2
-
-const char *SRC_FILE[] = {"a.h5", "b.h5", "c.h5"};
-
-const char *SRC_DATASET[] = {"A", "B", "C"};
-
-int
-main(void)
-{
- hid_t file, space, src_space, vspace, dset; /* Handles */
- hid_t dcpl;
- herr_t status;
- hsize_t vdsdims[2] = {VDSDIM0, VDSDIM1}, /* Virtual datasets dimension */
- dims[1] = {DIM0}, /* Source datasets dimensions */
- start[2], /* Hyperslab parameters */
- count[2], block[2];
- hsize_t start_out[2], stride_out[2], count_out[2], block_out[2];
- int wdata[DIM0], /* Write buffer for source dataset */
- rdata[VDSDIM0][VDSDIM1], /* Read buffer for virtual dataset */
- i, j, k, l, block_inc;
- int fill_value = -1; /* Fill value for VDS */
- H5D_layout_t layout; /* Storage layout */
- size_t num_map; /* Number of mappings */
- ssize_t len; /* Length of the string; also a return value */
- char *filename;
- char *dsetname;
- hsize_t nblocks;
- hsize_t *buf; /* Buffer to hold hyperslab coordinates */
-
- /*
- * Create source files and datasets. This step is optional.
- */
- for (i = 0; i < 3; i++) {
- /*
- * Initialize data for i-th source dataset.
- */
- for (j = 0; j < DIM0; j++)
- wdata[j] = i + 1;
-
- /*
- * Create the source files and datasets. Write data to each dataset and
- * close all resources.
- */
-
- file = H5Fcreate(SRC_FILE[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- space = H5Screate_simple(RANK1, dims, NULL);
- dset = H5Dcreate2(file, SRC_DATASET[i], H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- status = H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
- status = H5Sclose(space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
- }
-
- /* Create file in which virtual dataset will be stored. */
- file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
- /* Create VDS dataspace. */
- space = H5Screate_simple(RANK2, vdsdims, NULL);
-
- /* Set VDS creation property. */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- status = H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fill_value);
-
- /* Initialize hyperslab values. */
- start[0] = 0;
- start[1] = 0;
- count[0] = 1;
- count[1] = 1;
- block[0] = 1;
- block[1] = VDSDIM1;
-
- /*
- * Build the mappings.
- * Selections in the source datasets are H5S_ALL.
- * In the virtual dataset we select the first, the second and the third rows
- * and map each row to the data in the corresponding source dataset.
- */
- src_space = H5Screate_simple(RANK1, dims, NULL);
- for (i = 0; i < 3; i++) {
- start[0] = (hsize_t)i;
- /* Select i-th row in the virtual dataset; selection in the source datasets is the same. */
- status = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, block);
- status = H5Pset_virtual(dcpl, space, SRC_FILE[i], SRC_DATASET[i], src_space);
- }
-
- /* Create a virtual dataset. */
- dset = H5Dcreate2(file, DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- status = H5Sclose(space);
- status = H5Sclose(src_space);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- /*
- * Now we begin the read section of this example.
- */
-
- /*
- * Open the file and virtual dataset.
- */
- file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- dset = H5Dopen2(file, DATASET, H5P_DEFAULT);
-
- /*
- * Get creation property list and mapping properties.
- */
- dcpl = H5Dget_create_plist(dset);
-
- /*
- * Get storage layout.
- */
- layout = H5Pget_layout(dcpl);
- if (H5D_VIRTUAL == layout)
- printf(" Dataset has a virtual layout \n");
- else
- printf(" Wrong layout found \n");
-
- /*
- * Find the number of mappings.
- */
- status = H5Pget_virtual_count(dcpl, &num_map);
- printf(" Number of mappings is %lu\n", (unsigned long)num_map);
-
- /*
- * Get mapping parameters for each mapping.
- */
- for (i = 0; i < (int)num_map; i++) {
- printf(" Mapping %d \n", i);
- printf(" Selection in the virtual dataset ");
- /* Get selection in the virtual dataset */
- vspace = H5Pget_virtual_vspace(dcpl, (size_t)i);
-
- /* Make sure that this is a hyperslab selection and then print information. */
- if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
- nblocks = H5Sget_select_hyper_nblocks(vspace);
- buf = (hsize_t *)malloc(sizeof(hsize_t) * 2 * RANK2 * nblocks);
- status = H5Sget_select_hyper_blocklist(vspace, (hsize_t)0, nblocks, buf);
- for (l = 0; l < nblocks; l++) {
- block_inc = 2 * RANK2 * l;
- printf("(");
- for (k = 0; k < RANK2 - 1; k++)
- printf("%d,", (int)buf[block_inc + k]);
- printf("%d) - (", (int)buf[block_inc + k]);
- for (k = 0; k < RANK2 - 1; k++)
- printf("%d,", (int)buf[block_inc + RANK2 + k]);
- printf("%d)\n", (int)buf[block_inc + RANK2 + k]);
- }
- /* We also can use new APIs to get start, stride, count and block */
- if (H5Sis_regular_hyperslab(vspace)) {
- status = H5Sget_regular_hyperslab(vspace, start_out, stride_out, count_out, block_out);
- printf(" start = [%llu, %llu] \n", (unsigned long long)start_out[0],
- (unsigned long long)start_out[1]);
- printf(" stride = [%llu, %llu] \n", (unsigned long long)stride_out[0],
- (unsigned long long)stride_out[1]);
- printf(" count = [%llu, %llu] \n", (unsigned long long)count_out[0],
- (unsigned long long)count_out[1]);
- printf(" block = [%llu, %llu] \n", (unsigned long long)block_out[0],
- (unsigned long long)block_out[1]);
- }
- }
- /* Get source file name. */
- len = H5Pget_virtual_filename(dcpl, (size_t)i, NULL, 0);
- filename = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_filename(dcpl, (size_t)i, filename, len + 1);
- printf(" Source filename %s\n", filename);
-
- /* Get source dataset name. */
- len = H5Pget_virtual_dsetname(dcpl, (size_t)i, NULL, 0);
- dsetname = (char *)malloc((size_t)len * sizeof(char) + 1);
- H5Pget_virtual_dsetname(dcpl, (size_t)i, dsetname, len + 1);
- printf(" Source dataset name %s\n", dsetname);
-
- /* Get selection in the source dataset. */
- printf(" Selection in the source dataset ");
- src_space = H5Pget_virtual_srcspace(dcpl, (size_t)i);
-
- /* Make sure it is ALL selection and then print the coordinates. */
- if (H5Sget_select_type(src_space) == H5S_SEL_ALL) {
- printf("(0) - (%d) \n", DIM0 - 1);
- }
- H5Sclose(vspace);
- H5Sclose(src_space);
- free(filename);
- free(dsetname);
- free(buf);
- }
-
- /*
- * Read the data using the default properties.
- */
- status = H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata[0]);
-
- /*
- * Output the data to the screen.
- */
- printf(" VDS Data:\n");
- for (i = 0; i < VDSDIM0; i++) {
- printf(" [");
- for (j = 0; j < VDSDIM1; j++)
- printf(" %3d", rdata[i][j]);
- printf("]\n");
- }
- /*
- * Close and release resources.
- */
- status = H5Pclose(dcpl);
- status = H5Dclose(dset);
- status = H5Fclose(file);
-
- return 0;
-}
diff --git a/examples/ph5_filtered_writes.c b/examples/ph5_filtered_writes.c
deleted file mode 100644
index f4ddae9..0000000
--- a/examples/ph5_filtered_writes.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Example of using the parallel HDF5 library to write to datasets
- * with filters applied to them.
- *
- * If the HDF5_NOCLEANUP environment variable is set, the file that
- * this example creates will not be removed as the example finishes.
- *
- * The need of requirement of parallel file prefix is that in general
- * the current working directory in which compiling is done, is not suitable
- * for parallel I/O and there is no standard pathname for parallel file
- * systems. In some cases, the parallel file name may even need some
- * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
- * example parses the HDF5_PARAPREFIX environment variable for a prefix,
- * if one is needed.
- */
-
-#include <stdlib.h>
-
-#include "hdf5.h"
-
-#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
-
-#define EXAMPLE_FILE "ph5_filtered_writes.h5"
-#define EXAMPLE_DSET1_NAME "DSET1"
-#define EXAMPLE_DSET2_NAME "DSET2"
-
-#define EXAMPLE_DSET_DIMS 2
-#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
-
-/* Dataset datatype */
-#define HDF5_DATATYPE H5T_NATIVE_INT
-typedef int C_DATATYPE;
-
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif
-
-/* Global variables */
-int mpi_rank, mpi_size;
-
-/*
- * Routine to set an HDF5 filter on the given DCPL
- */
-static void
-set_filter(hid_t dcpl_id)
-{
- htri_t filter_avail;
-
- /*
- * Check if 'deflate' filter is available
- */
- filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
- if (filter_avail < 0)
- return;
- else if (filter_avail) {
- /*
- * Set 'deflate' filter with reasonable
- * compression level on DCPL
- */
- H5Pset_deflate(dcpl_id, 6);
- }
- else {
- /*
- * Set Fletcher32 checksum filter on DCPL
- * since it is always available in HDF5
- */
- H5Pset_fletcher32(dcpl_id);
- }
-}
-
-/*
- * Routine to fill a data buffer with data. Assumes
- * dimension rank is 2 and data is stored contiguous.
- */
-void
-fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
-{
- C_DATATYPE *dataptr = data;
- hsize_t i, j;
-
- /* Use MPI rank value for data */
- for (i = 0; i < count[0]; i++) {
- for (j = 0; j < count[1]; j++) {
- *dataptr++ = mpi_rank;
- }
- }
-}
-
-/* Cleanup created file */
-static void
-cleanup(char *filename)
-{
- bool do_cleanup = getenv(HDF5_NOCLEANUP) ? false : true;
-
- if (do_cleanup)
- MPI_File_delete(filename, MPI_INFO_NULL);
-}
-
-/*
- * Routine to write to a dataset in a fashion
- * where no chunks in the dataset are written
- * to by more than 1 MPI rank. This will
- * generally give the best performance as the
- * MPI ranks will need the least amount of
- * inter-process communication.
- */
-static void
-write_dataset_no_overlap(hid_t file_id, hid_t dxpl_id)
-{
- C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
- hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
- hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t stride[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t file_dataspace = H5I_INVALID_HID;
-
- /*
- * ------------------------------------
- * Setup Dataset Creation Property List
- * ------------------------------------
- */
-
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
-
- /*
- * REQUIRED: Dataset chunking must be enabled to
- * apply a data filter to the dataset.
- * Chunks in the dataset are of size
- * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
- */
- chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
-
- /* Set filter to be applied to created datasets */
- set_filter(dcpl_id);
-
- /*
- * ------------------------------------
- * Define the dimensions of the dataset
- * and create it
- * ------------------------------------
- */
-
- /*
- * Create a dataset composed of 4 chunks
- * per MPI rank. The first dataset dimension
- * scales according to the number of MPI ranks.
- * The second dataset dimension stays fixed
- * according to the chunk size.
- */
- dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
- dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
-
- /* Create the dataset */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET1_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
- H5P_DEFAULT);
-
- /*
- * ------------------------------------
- * Setup selection in the dataset for
- * each MPI rank
- * ------------------------------------
- */
-
- /*
- * Each MPI rank's selection covers a
- * single chunk in the first dataset
- * dimension. Each MPI rank's selection
- * covers 4 chunks in the second dataset
- * dimension. This leads to each MPI rank
- * writing to 4 chunks of the dataset.
- */
- start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
- start[1] = 0;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
-
- /*
- * --------------------------------------
- * Fill data buffer with MPI rank's rank
- * value to make it easy to see which
- * part of the dataset each rank wrote to
- * --------------------------------------
- */
-
- fill_databuf(start, count, stride, &data[0][0]);
-
- /*
- * ---------------------------------
- * Write to the dataset collectively
- * ---------------------------------
- */
-
- H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
-
- /*
- * --------------
- * Close HDF5 IDs
- * --------------
- */
-
- H5Sclose(file_dataspace);
- H5Pclose(dcpl_id);
- H5Dclose(dset_id);
-}
-
-/*
- * Routine to write to a dataset in a fashion
- * where every chunk in the dataset is written
- * to by every MPI rank. This will generally
- * give the worst performance as the MPI ranks
- * will need the most amount of inter-process
- * communication.
- */
-static void
-write_dataset_overlap(hid_t file_id, hid_t dxpl_id)
-{
- C_DATATYPE *data = NULL;
- hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
- hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t stride[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t file_dataspace = H5I_INVALID_HID;
-
- /*
- * ------------------------------------
- * Setup Dataset Creation Property List
- * ------------------------------------
- */
-
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
-
- /*
- * REQUIRED: Dataset chunking must be enabled to
- * apply a data filter to the dataset.
- * Chunks in the dataset are of size
- * mpi_size x EXAMPLE_DSET_CHUNK_DIM_SIZE.
- */
- chunk_dims[0] = mpi_size;
- chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
-
- /* Set filter to be applied to created datasets */
- set_filter(dcpl_id);
-
- /*
- * ------------------------------------
- * Define the dimensions of the dataset
- * and create it
- * ------------------------------------
- */
-
- /*
- * Create a dataset composed of N chunks,
- * where N is the number of MPI ranks. The
- * first dataset dimension scales according
- * to the number of MPI ranks. The second
- * dataset dimension stays fixed according
- * to the chunk size.
- */
- dataset_dims[0] = mpi_size * chunk_dims[0];
- dataset_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
-
- /* Create the dataset */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET2_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
- H5P_DEFAULT);
-
- /*
- * ------------------------------------
- * Setup selection in the dataset for
- * each MPI rank
- * ------------------------------------
- */
-
- /*
- * Each MPI rank's selection covers
- * part of every chunk in the first
- * dimension. Each MPI rank's selection
- * covers all of every chunk in the
- * second dimension. This leads to
- * each MPI rank writing an equal
- * amount of data to every chunk
- * in the dataset.
- */
- start[0] = mpi_rank;
- start[1] = 0;
- stride[0] = chunk_dims[0];
- stride[1] = 1;
- count[0] = mpi_size;
- count[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
-
- /*
- * --------------------------------------
- * Fill data buffer with MPI rank's rank
- * value to make it easy to see which
- * part of the dataset each rank wrote to
- * --------------------------------------
- */
-
- data = malloc(mpi_size * EXAMPLE_DSET_CHUNK_DIM_SIZE * sizeof(C_DATATYPE));
-
- fill_databuf(start, count, stride, data);
-
- /*
- * ---------------------------------
- * Write to the dataset collectively
- * ---------------------------------
- */
-
- H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
-
- free(data);
-
- /*
- * --------------
- * Close HDF5 IDs
- * --------------
- */
-
- H5Sclose(file_dataspace);
- H5Pclose(dcpl_id);
- H5Dclose(dset_id);
-}
-
-int
-main(int argc, char **argv)
-{
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- char *par_prefix = NULL;
- char filename[PATH_MAX];
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /*
- * ----------------------------------
- * Start parallel access to HDF5 file
- * ----------------------------------
- */
-
- /* Setup File Access Property List with parallel I/O access */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl_id, comm, info);
-
- /*
- * OPTIONAL: Set collective metadata reads on FAPL to allow
- * parallel writes to filtered datasets to perform
- * better at scale. While not strictly necessary,
- * this is generally recommended.
- */
- H5Pset_all_coll_metadata_ops(fapl_id, true);
-
- /*
- * OPTIONAL: Set the latest file format version for HDF5 in
- * order to gain access to different dataset chunk
- * index types and better data encoding methods.
- * While not strictly necessary, this is generally
- * recommended.
- */
- H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
-
- /* Parse any parallel prefix and create filename */
- par_prefix = getenv("HDF5_PARAPREFIX");
-
- snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
-
- /* Create HDF5 file */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
-
- /*
- * --------------------------------------
- * Setup Dataset Transfer Property List
- * with collective I/O
- * --------------------------------------
- */
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
-
- /*
- * REQUIRED: Setup collective I/O for the dataset
- * write operations. Parallel writes to
- * filtered datasets MUST be collective,
- * even if some ranks have no data to
- * contribute to the write operation.
- *
- * Refer to the 'ph5_filtered_writes_no_sel'
- * example to see how to setup a dataset
- * write when one or more MPI ranks have
- * no data to contribute to the write
- * operation.
- */
- H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
-
- /*
- * --------------------------------
- * Create and write to each dataset
- * --------------------------------
- */
-
- /*
- * Write to a dataset in a fashion where no
- * chunks in the dataset are written to by
- * more than 1 MPI rank. This will generally
- * give the best performance as the MPI ranks
- * will need the least amount of inter-process
- * communication.
- */
- write_dataset_no_overlap(file_id, dxpl_id);
-
- /*
- * Write to a dataset in a fashion where
- * every chunk in the dataset is written
- * to by every MPI rank. This will generally
- * give the worst performance as the MPI ranks
- * will need the most amount of inter-process
- * communication.
- */
- write_dataset_overlap(file_id, dxpl_id);
-
- /*
- * ------------------
- * Close all HDF5 IDs
- * ------------------
- */
-
- H5Pclose(dxpl_id);
- H5Pclose(fapl_id);
- H5Fclose(file_id);
-
- printf("PHDF5 example finished with no errors\n");
-
- /*
- * ------------------------------------
- * Cleanup created HDF5 file and finish
- * ------------------------------------
- */
-
- cleanup(filename);
-
- MPI_Finalize();
-
- return 0;
-}
-
-#else
-
-int
-main(void)
-{
- printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
- return 0;
-}
-
-#endif
diff --git a/examples/ph5_filtered_writes_no_sel.c b/examples/ph5_filtered_writes_no_sel.c
deleted file mode 100644
index e3ec103..0000000
--- a/examples/ph5_filtered_writes_no_sel.c
+++ /dev/null
@@ -1,369 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Example of using the parallel HDF5 library to collectively write to
- * datasets with filters applied to them when one or MPI ranks do not
- * have data to contribute to the dataset.
- *
- * If the HDF5_NOCLEANUP environment variable is set, the file that
- * this example creates will not be removed as the example finishes.
- *
- * The need of requirement of parallel file prefix is that in general
- * the current working directory in which compiling is done, is not suitable
- * for parallel I/O and there is no standard pathname for parallel file
- * systems. In some cases, the parallel file name may even need some
- * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
- * example parses the HDF5_PARAPREFIX environment variable for a prefix,
- * if one is needed.
- */
-
-#include <stdlib.h>
-
-#include "hdf5.h"
-
-#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
-
-#define EXAMPLE_FILE "ph5_filtered_writes_no_sel.h5"
-#define EXAMPLE_DSET_NAME "DSET"
-
-#define EXAMPLE_DSET_DIMS 2
-#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
-
-/* Dataset datatype */
-#define HDF5_DATATYPE H5T_NATIVE_INT
-typedef int C_DATATYPE;
-
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif
-
-/* Global variables */
-int mpi_rank, mpi_size;
-
-/*
- * Routine to set an HDF5 filter on the given DCPL
- */
-static void
-set_filter(hid_t dcpl_id)
-{
- htri_t filter_avail;
-
- /*
- * Check if 'deflate' filter is available
- */
- filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
- if (filter_avail < 0)
- return;
- else if (filter_avail) {
- /*
- * Set 'deflate' filter with reasonable
- * compression level on DCPL
- */
- H5Pset_deflate(dcpl_id, 6);
- }
- else {
- /*
- * Set Fletcher32 checksum filter on DCPL
- * since it is always available in HDF5
- */
- H5Pset_fletcher32(dcpl_id);
- }
-}
-
-/*
- * Routine to fill a data buffer with data. Assumes
- * dimension rank is 2 and data is stored contiguous.
- */
-void
-fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
-{
- C_DATATYPE *dataptr = data;
- hsize_t i, j;
-
- /* Use MPI rank value for data */
- for (i = 0; i < count[0]; i++) {
- for (j = 0; j < count[1]; j++) {
- *dataptr++ = mpi_rank;
- }
- }
-}
-
-/* Cleanup created file */
-static void
-cleanup(char *filename)
-{
- bool do_cleanup = getenv(HDF5_NOCLEANUP) ? false : true;
-
- if (do_cleanup)
- MPI_File_delete(filename, MPI_INFO_NULL);
-}
-
-/*
- * Routine to write to a dataset in a fashion
- * where no chunks in the dataset are written
- * to by more than 1 MPI rank. This will
- * generally give the best performance as the
- * MPI ranks will need the least amount of
- * inter-process communication.
- */
-static void
-write_dataset_some_no_sel(hid_t file_id, hid_t dxpl_id)
-{
- C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
- hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
- hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t stride[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- bool no_selection;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t file_dataspace = H5I_INVALID_HID;
-
- /*
- * ------------------------------------
- * Setup Dataset Creation Property List
- * ------------------------------------
- */
-
- dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
-
- /*
- * REQUIRED: Dataset chunking must be enabled to
- * apply a data filter to the dataset.
- * Chunks in the dataset are of size
- * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
- */
- chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
-
- /* Set filter to be applied to created datasets */
- set_filter(dcpl_id);
-
- /*
- * ------------------------------------
- * Define the dimensions of the dataset
- * and create it
- * ------------------------------------
- */
-
- /*
- * Create a dataset composed of 4 chunks
- * per MPI rank. The first dataset dimension
- * scales according to the number of MPI ranks.
- * The second dataset dimension stays fixed
- * according to the chunk size.
- */
- dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
- dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
-
- /* Create the dataset */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
- H5P_DEFAULT);
-
- /*
- * ------------------------------------
- * Setup selection in the dataset for
- * each MPI rank
- * ------------------------------------
- */
-
- /*
- * Odd rank value MPI ranks do not
- * contribute any data to the dataset.
- */
- no_selection = (mpi_rank % 2) == 1;
-
- if (no_selection) {
- /*
- * MPI ranks not contributing data to
- * the dataset should call H5Sselect_none
- * on the file dataspace that will be
- * passed to H5Dwrite.
- */
- H5Sselect_none(file_dataspace);
- }
- else {
- /*
- * Even MPI ranks contribute data to
- * the dataset. Each MPI rank's selection
- * covers a single chunk in the first dataset
- * dimension. Each MPI rank's selection
- * covers 4 chunks in the second dataset
- * dimension. This leads to each contributing
- * MPI rank writing to 4 chunks of the dataset.
- */
- start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
- start[1] = 0;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
- count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
-
- H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
-
- /*
- * --------------------------------------
- * Fill data buffer with MPI rank's rank
- * value to make it easy to see which
- * part of the dataset each rank wrote to
- * --------------------------------------
- */
-
- fill_databuf(start, count, stride, &data[0][0]);
- }
-
- /*
- * ---------------------------------
- * Write to the dataset collectively
- * ---------------------------------
- */
-
- H5Dwrite(dset_id, HDF5_DATATYPE, no_selection ? H5S_ALL : H5S_BLOCK, file_dataspace, dxpl_id, data);
-
- /*
- * --------------
- * Close HDF5 IDs
- * --------------
- */
-
- H5Sclose(file_dataspace);
- H5Pclose(dcpl_id);
- H5Dclose(dset_id);
-}
-
-int
-main(int argc, char **argv)
-{
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- char *par_prefix = NULL;
- char filename[PATH_MAX];
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /*
- * ----------------------------------
- * Start parallel access to HDF5 file
- * ----------------------------------
- */
-
- /* Setup File Access Property List with parallel I/O access */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl_id, comm, info);
-
- /*
- * OPTIONAL: Set collective metadata reads on FAPL to allow
- * parallel writes to filtered datasets to perform
- * better at scale. While not strictly necessary,
- * this is generally recommended.
- */
- H5Pset_all_coll_metadata_ops(fapl_id, true);
-
- /*
- * OPTIONAL: Set the latest file format version for HDF5 in
- * order to gain access to different dataset chunk
- * index types and better data encoding methods.
- * While not strictly necessary, this is generally
- * recommended.
- */
- H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
-
- /* Parse any parallel prefix and create filename */
- par_prefix = getenv("HDF5_PARAPREFIX");
-
- snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
-
- /* Create HDF5 file */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
-
- /*
- * --------------------------------------
- * Setup Dataset Transfer Property List
- * with collective I/O
- * --------------------------------------
- */
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
-
- /*
- * REQUIRED: Setup collective I/O for the dataset
- * write operations. Parallel writes to
- * filtered datasets MUST be collective,
- * even if some ranks have no data to
- * contribute to the write operation.
- */
- H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
-
- /*
- * --------------------------------
- * Create and write to the dataset
- * --------------------------------
- */
-
- /*
- * Write to a dataset in a fashion where no
- * chunks in the dataset are written to by
- * more than 1 MPI rank and some MPI ranks
- * have nothing to contribute to the dataset.
- * In this case, the MPI ranks that have no
- * data to contribute must still participate
- * in the collective H5Dwrite call, but should
- * call H5Sselect_none on the file dataspace
- * passed to the H5Dwrite call.
- */
- write_dataset_some_no_sel(file_id, dxpl_id);
-
- /*
- * ------------------
- * Close all HDF5 IDs
- * ------------------
- */
-
- H5Pclose(dxpl_id);
- H5Pclose(fapl_id);
- H5Fclose(file_id);
-
- printf("PHDF5 example finished with no errors\n");
-
- /*
- * ------------------------------------
- * Cleanup created HDF5 file and finish
- * ------------------------------------
- */
-
- cleanup(filename);
-
- MPI_Finalize();
-
- return 0;
-}
-
-#else
-
-int
-main(void)
-{
- printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
- return 0;
-}
-
-#endif
diff --git a/examples/ph5_subfiling.c b/examples/ph5_subfiling.c
deleted file mode 100644
index f9c3322..0000000
--- a/examples/ph5_subfiling.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Example of using HDF5's Subfiling VFD to write to an
- * HDF5 file that is striped across multiple subfiles
- *
- * If the HDF5_NOCLEANUP environment variable is set, the
- * files that this example creates will not be removed as
- * the example finishes.
- *
- * In general, the current working directory in which compiling
- * is done, is not suitable for parallel I/O and there is no
- * standard pathname for parallel file systems. In some cases,
- * the parallel file name may even need some parallel file type
- * prefix such as: "pfs:/GF/...". Therefore, this example parses
- * the HDF5_PARAPREFIX environment variable for a prefix, if one
- * is needed.
- */
-
-#include <stdlib.h>
-
-#include "hdf5.h"
-
-#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_SUBFILING_VFD)
-
-#define EXAMPLE_FILE "h5_subfiling_default_example.h5"
-#define EXAMPLE_FILE2 "h5_subfiling_custom_example.h5"
-#define EXAMPLE_FILE3 "h5_subfiling_precreate_example.h5"
-
-#define EXAMPLE_DSET_NAME "DSET"
-#define EXAMPLE_DSET_DIMS 2
-
-/* Have each MPI rank write 16MiB of data */
-#define EXAMPLE_DSET_NY 4194304
-
-/* Dataset datatype */
-#define EXAMPLE_DSET_DATATYPE H5T_NATIVE_INT
-typedef int EXAMPLE_DSET_C_DATATYPE;
-
-/* Cleanup created files */
-static void
-cleanup(char *filename, hid_t fapl_id)
-{
- bool do_cleanup = getenv(HDF5_NOCLEANUP) ? false : true;
-
- if (do_cleanup)
- H5Fdelete(filename, fapl_id);
-}
-
-/*
- * An example of using the HDF5 Subfiling VFD with
- * its default settings of 1 subfile per node, with
- * a stripe size of 32MiB
- */
-static void
-subfiling_write_default(hid_t fapl_id, int mpi_size, int mpi_rank)
-{
- EXAMPLE_DSET_C_DATATYPE *data;
- hsize_t dset_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- hid_t file_id;
- hid_t subfiling_fapl;
- hid_t dset_id;
- hid_t filespace;
- char filename[512];
- char *par_prefix;
-
- /*
- * Make a copy of the FAPL so we don't disturb
- * it for the other examples
- */
- subfiling_fapl = H5Pcopy(fapl_id);
-
- /*
- * Set Subfiling VFD on FAPL using default settings
- * (use IOC VFD, 1 IOC per node, 32MiB stripe size)
- *
- * Note that all of Subfiling's configuration settings
- * can be adjusted with environment variables as well
- * in this case.
- */
- H5Pset_fapl_subfiling(subfiling_fapl, NULL);
-
- /*
- * OPTIONAL: Set alignment of objects in HDF5 file to
- * be equal to the Subfiling stripe size.
- * Choosing a Subfiling stripe size and HDF5
- * object alignment value that are some
- * multiple of the disk block size can
- * generally help performance by ensuring
- * that I/O is well-aligned and doesn't
- * excessively cross stripe boundaries.
- *
- * Note that this option can substantially
- * increase the size of the resulting HDF5
- * files, so it is a good idea to keep an eye
- * on this.
- */
- H5Pset_alignment(subfiling_fapl, 0, 33554432); /* Align to default 32MiB stripe size */
-
- /* Parse any parallel prefix and create filename */
- par_prefix = getenv("HDF5_PARAPREFIX");
-
- snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
- EXAMPLE_FILE);
-
- /*
- * Create a new file collectively
- */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
-
- /*
- * Create the dataspace for the dataset. The first
- * dimension varies with the number of MPI ranks
- * while the second dimension is fixed.
- */
- dset_dims[0] = mpi_size;
- dset_dims[1] = EXAMPLE_DSET_NY;
- filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
-
- /*
- * Create the dataset with default properties
- */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
- H5P_DEFAULT, H5P_DEFAULT);
-
- /*
- * Each MPI rank writes from a contiguous memory
- * region to the hyperslab in the file
- */
- start[0] = mpi_rank;
- start[1] = 0;
- count[0] = 1;
- count[1] = dset_dims[1];
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
-
- /*
- * Initialize data buffer
- */
- data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
- for (size_t i = 0; i < count[0] * count[1]; i++) {
- data[i] = mpi_rank + i;
- }
-
- /*
- * Write to dataset
- */
- H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
-
- /*
- * Close/release resources.
- */
-
- free(data);
-
- H5Dclose(dset_id);
- H5Sclose(filespace);
- H5Fclose(file_id);
-
- cleanup(EXAMPLE_FILE, subfiling_fapl);
-
- H5Pclose(subfiling_fapl);
-}
-
-/*
- * An example of using the HDF5 Subfiling VFD with
- * custom settings
- */
-static void
-subfiling_write_custom(hid_t fapl_id, int mpi_size, int mpi_rank)
-{
- EXAMPLE_DSET_C_DATATYPE *data;
- H5FD_subfiling_config_t subf_config;
- H5FD_ioc_config_t ioc_config;
- hsize_t dset_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- hid_t file_id;
- hid_t subfiling_fapl;
- hid_t dset_id;
- hid_t filespace;
- char filename[512];
- char *par_prefix;
-
- /*
- * Make a copy of the FAPL so we don't disturb
- * it for the other examples
- */
- subfiling_fapl = H5Pcopy(fapl_id);
-
- /*
- * Get a default Subfiling and IOC configuration
- */
- H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
- H5Pget_fapl_ioc(subfiling_fapl, &ioc_config);
-
- /*
- * Set Subfiling configuration to use a 1MiB
- * stripe size and the SELECT_IOC_EVERY_NTH_RANK
- * selection method. By default, without a setting
- * in the H5FD_SUBFILING_IOC_SELECTION_CRITERIA
- * environment variable, this will use every MPI
- * rank as an I/O concentrator.
- */
- subf_config.shared_cfg.stripe_size = 1048576;
- subf_config.shared_cfg.ioc_selection = SELECT_IOC_EVERY_NTH_RANK;
-
- /*
- * Set IOC configuration to use 2 worker threads
- * per IOC instead of the default setting and
- * update IOC configuration with new subfiling
- * configuration.
- */
- ioc_config.thread_pool_size = 2;
-
- /*
- * Set our new configuration on the IOC
- * FAPL used for Subfiling
- */
- H5Pset_fapl_ioc(subf_config.ioc_fapl_id, &ioc_config);
-
- /*
- * Finally, set our new Subfiling configuration
- * on the original FAPL
- */
- H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
-
- /*
- * OPTIONAL: Set alignment of objects in HDF5 file to
- * be equal to the Subfiling stripe size.
- * Choosing a Subfiling stripe size and HDF5
- * object alignment value that are some
- * multiple of the disk block size can
- * generally help performance by ensuring
- * that I/O is well-aligned and doesn't
- * excessively cross stripe boundaries.
- *
- * Note that this option can substantially
- * increase the size of the resulting HDF5
- * files, so it is a good idea to keep an eye
- * on this.
- */
- H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
-
- /* Parse any parallel prefix and create filename */
- par_prefix = getenv("HDF5_PARAPREFIX");
-
- snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
- EXAMPLE_FILE2);
-
- /*
- * Create a new file collectively
- */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
-
- /*
- * Create the dataspace for the dataset. The first
- * dimension varies with the number of MPI ranks
- * while the second dimension is fixed.
- */
- dset_dims[0] = mpi_size;
- dset_dims[1] = EXAMPLE_DSET_NY;
- filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
-
- /*
- * Create the dataset with default properties
- */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
- H5P_DEFAULT, H5P_DEFAULT);
-
- /*
- * Each MPI rank writes from a contiguous memory
- * region to the hyperslab in the file
- */
- start[0] = mpi_rank;
- start[1] = 0;
- count[0] = 1;
- count[1] = dset_dims[1];
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, count, NULL);
-
- /*
- * Initialize data buffer
- */
- data = malloc(count[0] * count[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
- for (size_t i = 0; i < count[0] * count[1]; i++) {
- data[i] = mpi_rank + i;
- }
-
- /*
- * Write to dataset
- */
- H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
-
- /*
- * Close/release resources.
- */
-
- free(data);
-
- H5Dclose(dset_id);
- H5Sclose(filespace);
- H5Fclose(file_id);
-
- cleanup(EXAMPLE_FILE2, subfiling_fapl);
-
- H5Pclose(subfiling_fapl);
-}
-
-/*
- * An example of pre-creating an HDF5 file on MPI rank
- * 0 when using the HDF5 Subfiling VFD. In this case,
- * the subfiling stripe count must be set so that rank
- * 0 knows how many subfiles to pre-create.
- */
-static void
-subfiling_write_precreate(hid_t fapl_id, int mpi_size, int mpi_rank)
-{
- EXAMPLE_DSET_C_DATATYPE *data;
- H5FD_subfiling_config_t subf_config;
- hsize_t dset_dims[EXAMPLE_DSET_DIMS];
- hsize_t start[EXAMPLE_DSET_DIMS];
- hsize_t count[EXAMPLE_DSET_DIMS];
- hid_t file_id;
- hid_t subfiling_fapl;
- hid_t dset_id;
- hid_t filespace;
- char filename[512];
- char *par_prefix;
-
- /*
- * Make a copy of the FAPL so we don't disturb
- * it for the other examples
- */
- subfiling_fapl = H5Pcopy(fapl_id);
-
- /*
- * Get a default Subfiling and IOC configuration
- */
- H5Pget_fapl_subfiling(subfiling_fapl, &subf_config);
-
- /*
- * Set the Subfiling stripe count so that rank
- * 0 knows how many subfiles the logical HDF5
- * file should consist of. In this case, use
- * 5 subfiles with a default stripe size of
- * 32MiB.
- */
- subf_config.shared_cfg.stripe_count = 5;
-
- /*
- * OPTIONAL: Set alignment of objects in HDF5 file to
- * be equal to the Subfiling stripe size.
- * Choosing a Subfiling stripe size and HDF5
- * object alignment value that are some
- * multiple of the disk block size can
- * generally help performance by ensuring
- * that I/O is well-aligned and doesn't
- * excessively cross stripe boundaries.
- *
- * Note that this option can substantially
- * increase the size of the resulting HDF5
- * files, so it is a good idea to keep an eye
- * on this.
- */
- H5Pset_alignment(subfiling_fapl, 0, 1048576); /* Align to custom 1MiB stripe size */
-
- /* Parse any parallel prefix and create filename */
- par_prefix = getenv("HDF5_PARAPREFIX");
-
- snprintf(filename, sizeof(filename), "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "",
- EXAMPLE_FILE3);
-
- /* Set dataset dimensionality */
- dset_dims[0] = mpi_size;
- dset_dims[1] = EXAMPLE_DSET_NY;
-
- if (mpi_rank == 0) {
- /*
- * Make sure only this rank opens the file
- */
- H5Pset_mpi_params(subfiling_fapl, MPI_COMM_SELF, MPI_INFO_NULL);
-
- /*
- * Set the Subfiling VFD on our FAPL using
- * our custom configuration
- */
- H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
-
- /*
- * Create a new file on rank 0
- */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, subfiling_fapl);
-
- /*
- * Create the dataspace for the dataset. The first
- * dimension varies with the number of MPI ranks
- * while the second dimension is fixed.
- */
- filespace = H5Screate_simple(EXAMPLE_DSET_DIMS, dset_dims, NULL);
-
- /*
- * Create the dataset with default properties
- */
- dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, EXAMPLE_DSET_DATATYPE, filespace, H5P_DEFAULT,
- H5P_DEFAULT, H5P_DEFAULT);
-
- /*
- * Initialize data buffer
- */
- data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
- for (size_t i = 0; i < dset_dims[0] * dset_dims[1]; i++) {
- data[i] = i;
- }
-
- /*
- * Rank 0 writes to the whole dataset
- */
- H5Dwrite(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, filespace, H5P_DEFAULT, data);
-
- /*
- * Close/release resources.
- */
-
- free(data);
-
- H5Dclose(dset_id);
- H5Sclose(filespace);
- H5Fclose(file_id);
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /*
- * Use all MPI ranks to re-open the file and
- * read back the dataset that was created
- */
- H5Pset_mpi_params(subfiling_fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
-
- /*
- * Use the same subfiling configuration as rank 0
- * used to create the file
- */
- H5Pset_fapl_subfiling(subfiling_fapl, &subf_config);
-
- /*
- * Re-open the file on all ranks
- */
- file_id = H5Fopen(filename, H5F_ACC_RDONLY, subfiling_fapl);
-
- /*
- * Open the dataset that was created
- */
- dset_id = H5Dopen2(file_id, EXAMPLE_DSET_NAME, H5P_DEFAULT);
-
- /*
- * Initialize data buffer
- */
- data = malloc(dset_dims[0] * dset_dims[1] * sizeof(EXAMPLE_DSET_C_DATATYPE));
-
- /*
- * Read the dataset on all ranks
- */
- H5Dread(dset_id, EXAMPLE_DSET_DATATYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, data);
-
- /*
- * Close/release resources.
- */
-
- free(data);
-
- H5Dclose(dset_id);
- H5Fclose(file_id);
-
- cleanup(EXAMPLE_FILE3, subfiling_fapl);
-
- H5Pclose(subfiling_fapl);
-}
-
-int
-main(int argc, char **argv)
-{
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hid_t fapl_id;
- int mpi_size;
- int mpi_rank;
- int mpi_thread_required = MPI_THREAD_MULTIPLE;
- int mpi_thread_provided = 0;
-
- /* HDF5 Subfiling VFD requires MPI_Init_thread with MPI_THREAD_MULTIPLE */
- MPI_Init_thread(&argc, &argv, mpi_thread_required, &mpi_thread_provided);
- if (mpi_thread_provided < mpi_thread_required) {
- printf("MPI_THREAD_MULTIPLE not supported\n");
- MPI_Abort(comm, -1);
- }
-
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /*
- * Set up File Access Property List with MPI
- * parameters for the Subfiling VFD to use
- */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_mpi_params(fapl_id, comm, info);
-
- /* Use Subfiling VFD with default settings */
- subfiling_write_default(fapl_id, mpi_size, mpi_rank);
-
- /* Use Subfiling VFD with custom settings */
- subfiling_write_custom(fapl_id, mpi_size, mpi_rank);
-
- /*
- * Use Subfiling VFD to precreate the HDF5
- * file on MPI rank 0
- */
- subfiling_write_precreate(fapl_id, mpi_size, mpi_rank);
-
- H5Pclose(fapl_id);
-
- if (mpi_rank == 0)
- printf("PHDF5 example finished with no errors\n");
-
- MPI_Finalize();
-
- return 0;
-}
-
-#else
-
-/* dummy program since HDF5 is not parallel-enabled */
-int
-main(void)
-{
- printf(
- "Example program cannot run - HDF5 must be built with parallel support and Subfiling VFD support\n");
- return 0;
-}
-
-#endif /* H5_HAVE_PARALLEL && H5_HAVE_SUBFILING_VFD */
diff --git a/examples/ph5example.c b/examples/ph5example.c
deleted file mode 100644
index 5ec2cdc..0000000
--- a/examples/ph5example.c
+++ /dev/null
@@ -1,1100 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Example of using the parallel HDF5 library to access datasets.
- * Last revised: April 24, 2001.
- *
- * This program contains two parts. In the first part, the mpi processes
- * collectively create a new parallel HDF5 file and create two fixed
- * dimension datasets in it. Then each process writes a hyperslab into
- * each dataset in an independent mode. All processes collectively
- * close the datasets and the file.
- * In the second part, the processes collectively open the created file
- * and the two datasets in it. Then each process reads a hyperslab from
- * each dataset in an independent mode and prints them out.
- * All processes collectively close the datasets and the file.
- *
- * The need of requirement of parallel file prefix is that in general
- * the current working directory in which compiling is done, is not suitable
- * for parallel I/O and there is no standard pathname for parallel file
- * systems. In some cases, the parallel file name may even needs some
- * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
- * example requires an explicit parallel file prefix. See the usage
- * for more detail.
- */
-
-#include <assert.h>
-#include "hdf5.h"
-#include <string.h>
-#include <stdlib.h>
-
-#ifdef H5_HAVE_PARALLEL
-/* Temporary source code */
-#define FAIL -1
-/* temporary code end */
-
-/* Define some handy debugging shorthands, routines, ... */
-/* debugging tools */
-#define MESG(x) \
- do { \
- if (verbose) \
- printf("%s\n", x); \
- } while (0)
-
-#define MPI_BANNER(mesg) \
- do { \
- printf("--------------------------------\n"); \
- printf("Proc %d: ", mpi_rank); \
- printf("*** %s\n", mesg); \
- printf("--------------------------------\n"); \
- } while (0)
-
-#define SYNC(comm) \
- do { \
- MPI_BANNER("doing a SYNC"); \
- MPI_Barrier(comm); \
- MPI_BANNER("SYNC DONE"); \
- } while (0)
-/* End of Define some handy debugging shorthands, routines, ... */
-
-/* Constants definitions */
-/* 24 is a multiple of 2, 3, 4, 6, 8, 12. Neat for parallel tests. */
-#define SPACE1_DIM1 24
-#define SPACE1_DIM2 24
-#define SPACE1_RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-/* hyperslab layout styles */
-#define BYROW 1 /* divide into slabs of rows */
-#define BYCOL 2 /* divide into blocks of columns */
-
-#define PARAPREFIX "HDF5_PARAPREFIX" /* file prefix environment variable name */
-
-/* dataset data type. Int's can be easily octo dumped. */
-typedef int DATATYPE;
-
-/* global variables */
-int nerrors = 0; /* errors count */
-#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
-char testfiles[2][PATH_MAX];
-
-int mpi_size, mpi_rank; /* mpi variables */
-
-/* option flags */
-int verbose = 0; /* verbose, default as no. */
-int doread = 1; /* read test */
-int dowrite = 1; /* write test */
-int docleanup = 1; /* cleanup */
-
-/* Prototypes */
-void slab_set(hsize_t start[], hsize_t count[], hsize_t stride[], int mode);
-void dataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset);
-void dataset_print(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset);
-int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original);
-void phdf5writeInd(char *filename);
-void phdf5readInd(char *filename);
-void phdf5writeAll(char *filename);
-void phdf5readAll(char *filename);
-void test_split_comm_access(char filenames[][PATH_MAX]);
-int parse_options(int argc, char **argv);
-void usage(void);
-int mkfilenames(char *prefix);
-void cleanup(void);
-
-/*
- * Setup the dimensions of the hyperslab.
- * Two modes--by rows or by columns.
- * Assume dimension rank is 2.
- */
-void
-slab_set(hsize_t start[], hsize_t count[], hsize_t stride[], int mode)
-{
- switch (mode) {
- case BYROW:
- /* Each process takes a slabs of rows. */
- stride[0] = 1;
- stride[1] = 1;
- count[0] = SPACE1_DIM1 / mpi_size;
- count[1] = SPACE1_DIM2;
- start[0] = mpi_rank * count[0];
- start[1] = 0;
- break;
- case BYCOL:
- /* Each process takes a block of columns. */
- stride[0] = 1;
- stride[1] = 1;
- count[0] = SPACE1_DIM1;
- count[1] = SPACE1_DIM2 / mpi_size;
- start[0] = 0;
- start[1] = mpi_rank * count[1];
- break;
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- printf("unknown slab_set mode (%d)\n", mode);
- stride[0] = 1;
- stride[1] = 1;
- count[0] = SPACE1_DIM1;
- count[1] = SPACE1_DIM2;
- start[0] = 0;
- start[1] = 0;
- break;
- }
-}
-
-/*
- * Fill the dataset with trivial data for testing.
- * Assume dimension rank is 2 and data is stored contiguous.
- */
-void
-dataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset)
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* put some trivial data in the data_array */
- for (i = 0; i < count[0]; i++) {
- for (j = 0; j < count[1]; j++) {
- *dataptr++ = (i * stride[0] + start[0]) * 100 + (j * stride[1] + start[1] + 1);
- }
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-void
-dataset_print(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset)
-{
- DATATYPE *dataptr = dataset;
- hsize_t i, j;
-
- /* print the slab read */
- for (i = 0; i < count[0]; i++) {
- printf("Row %lu: ", (unsigned long)(i * stride[0] + start[0]));
- for (j = 0; j < count[1]; j++) {
- printf("%03d ", *dataptr++);
- }
- printf("\n");
- }
-}
-
-/*
- * Print the content of the dataset.
- */
-int
-dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original)
-{
-#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
-
- hsize_t i, j;
- int nerr;
-
- /* print it if verbose */
- if (verbose)
- dataset_print(start, count, stride, dataset);
-
- nerr = 0;
- for (i = 0; i < count[0]; i++) {
- for (j = 0; j < count[1]; j++) {
- if (*dataset++ != *original++) {
- nerr++;
- if (nerr <= MAX_ERR_REPORT) {
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)(i * stride[0] + start[0]),
- (unsigned long)(j * stride[1] + start[1]), *(dataset - 1), *(original - 1));
- }
- }
- }
- }
- if (nerr > MAX_ERR_REPORT)
- printf("[more errors ...]\n");
- if (nerr)
- printf("%d errors found in dataset_vrfy\n", nerr);
- return (nerr);
-}
-
-/*
- * Example of using the parallel HDF5 library to create two datasets
- * in one HDF5 files with parallel MPIO access support.
- * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
- * Each process controls only a slab of size DIM1 x DIM2 within each
- * dataset.
- */
-
-void
-phdf5writeInd(char *filename)
-{
- hid_t fid1; /* HDF5 file IDs */
- hid_t acc_tpl1; /* File access templates */
- hid_t sid1; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims1[SPACE1_RANK] = {SPACE1_DIM1, SPACE1_DIM2}; /* dataspace dim sizes */
- DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
-
- hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
- hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- if (verbose)
- printf("Independent write test on file %s\n", filename);
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template with parallel IO access. */
- acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
- assert(acc_tpl1 != FAIL);
- MESG("H5Pcreate access succeed");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
- assert(ret != FAIL);
- MESG("H5Pset_fapl_mpio succeed");
-
- /* create the file collectively */
- fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl1);
- assert(fid1 != FAIL);
- MESG("H5Fcreate succeed");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl1);
- assert(ret != FAIL);
-
- /* --------------------------
- * Define the dimensions of the overall datasets
- * and the slabs local to the MPI process.
- * ------------------------- */
- /* setup dimensionality object */
- sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
- assert(sid1 != FAIL);
- MESG("H5Screate_simple succeed");
-
- /* create a dataset collectively */
- dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- assert(dataset1 != FAIL);
- MESG("H5Dcreate2 succeed");
-
- /* create another dataset collectively */
- dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- assert(dataset2 != FAIL);
- MESG("H5Dcreate2 succeed");
-
- /* set up dimensions of the slab this process accesses */
- start[0] = mpi_rank * SPACE1_DIM1 / mpi_size;
- start[1] = 0;
- count[0] = SPACE1_DIM1 / mpi_size;
- count[1] = SPACE1_DIM2;
- stride[0] = 1;
- stride[1] = 1;
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* put some trivial data in the data_array */
- dataset_fill(start, count, stride, &data_array1[0][0]);
- MESG("data_array initialized");
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- MESG("H5Dget_space succeed");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
- MESG("H5Sset_hyperslab succeed");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- assert(ret != FAIL);
- MESG("H5Dwrite succeed");
-
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- assert(ret != FAIL);
- MESG("H5Dwrite succeed");
-
- /* release dataspace ID */
- H5Sclose(file_dataspace);
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- assert(ret != FAIL);
- MESG("H5Dclose1 succeed");
- ret = H5Dclose(dataset2);
- assert(ret != FAIL);
- MESG("H5Dclose2 succeed");
-
- /* release all IDs created */
- H5Sclose(sid1);
-
- /* close the file collectively */
- H5Fclose(fid1);
-}
-
-/* Example of using the parallel HDF5 library to read a dataset */
-void
-phdf5readInd(char *filename)
-{
- hid_t fid1; /* HDF5 file IDs */
- hid_t acc_tpl1; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
- DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
-
- hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
- hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- if (verbose)
- printf("Independent read test on file %s\n", filename);
-
- /* setup file access template */
- acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
- assert(acc_tpl1 != FAIL);
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
- assert(ret != FAIL);
-
- /* open the file collectively */
- fid1 = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl1);
- assert(fid1 != FAIL);
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl1);
- assert(ret != FAIL);
-
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
- assert(dataset1 != FAIL);
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
- assert(dataset2 != FAIL);
-
- /* set up dimensions of the slab this process accesses */
- start[0] = mpi_rank * SPACE1_DIM1 / mpi_size;
- start[1] = 0;
- count[0] = SPACE1_DIM1 / mpi_size;
- count[1] = SPACE1_DIM2;
- stride[0] = 1;
- stride[1] = 1;
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* fill dataset with test data */
- dataset_fill(start, count, stride, &data_origin1[0][0]);
-
- /* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- assert(ret != FAIL);
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
- assert(ret != FAIL);
-
- /* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
- assert(ret != FAIL);
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
- assert(ret == 0);
-
- /* close dataset collectively */
- ret = H5Dclose(dataset1);
- assert(ret != FAIL);
- ret = H5Dclose(dataset2);
- assert(ret != FAIL);
-
- /* release all IDs created */
- H5Sclose(file_dataspace);
-
- /* close the file collectively */
- H5Fclose(fid1);
-}
-
-/*
- * Example of using the parallel HDF5 library to create two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
- * Each process controls only a slab of size DIM1 x DIM2 within each
- * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
- * each process controls a hyperslab within.]
- */
-
-void
-phdf5writeAll(char *filename)
-{
- hid_t fid1; /* HDF5 file IDs */
- hid_t acc_tpl1; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid1; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims1[SPACE1_RANK] = {SPACE1_DIM1, SPACE1_DIM2}; /* dataspace dim sizes */
- DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
-
- hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
- hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- if (verbose)
- printf("Collective write test on file %s\n", filename);
-
- /* -------------------
- * START AN HDF5 FILE
- * -------------------*/
- /* setup file access template with parallel IO access. */
- acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
- assert(acc_tpl1 != FAIL);
- MESG("H5Pcreate access succeed");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
- assert(ret != FAIL);
- MESG("H5Pset_fapl_mpio succeed");
-
- /* create the file collectively */
- fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl1);
- assert(fid1 != FAIL);
- MESG("H5Fcreate succeed");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl1);
- assert(ret != FAIL);
-
- /* --------------------------
- * Define the dimensions of the overall datasets
- * and create the dataset
- * ------------------------- */
- /* setup dimensionality object */
- sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
- assert(sid1 != FAIL);
- MESG("H5Screate_simple succeed");
-
- /* create a dataset collectively */
- dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- assert(dataset1 != FAIL);
- MESG("H5Dcreate2 succeed");
-
- /* create another dataset collectively */
- dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- assert(dataset2 != FAIL);
- MESG("H5Dcreate2 2 succeed");
-
- /*
- * Set up dimensions of the slab this process accesses.
- */
-
- /* Dataset1: each process takes a block of rows. */
- slab_set(start, count, stride, BYROW);
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- MESG("H5Dget_space succeed");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
- MESG("H5Sset_hyperslab succeed");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, count, stride, &data_array1[0][0]);
- MESG("data_array initialized");
- if (verbose) {
- MESG("data_array created");
- dataset_print(start, count, stride, &data_array1[0][0]);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- assert(xfer_plist != FAIL);
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- assert(ret != FAIL);
- MESG("H5Pcreate xfer succeed");
-
- /* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- assert(ret != FAIL);
- MESG("H5Dwrite succeed");
-
- /* release all temporary handles. */
- /* Could have used them for dataset2 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset2: each process takes a block of columns. */
- slab_set(start, count, stride, BYCOL);
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* put some trivial data in the data_array */
- dataset_fill(start, count, stride, &data_array1[0][0]);
- MESG("data_array initialized");
- if (verbose) {
- MESG("data_array created");
- dataset_print(start, count, stride, &data_array1[0][0]);
- }
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- MESG("H5Dget_space succeed");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
- MESG("H5Sset_hyperslab succeed");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* fill the local slab with some trivial data */
- dataset_fill(start, count, stride, &data_array1[0][0]);
- MESG("data_array initialized");
- if (verbose) {
- MESG("data_array created");
- dataset_print(start, count, stride, &data_array1[0][0]);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- assert(xfer_plist != FAIL);
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- assert(ret != FAIL);
- MESG("H5Pcreate xfer succeed");
-
- /* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- assert(ret != FAIL);
- MESG("H5Dwrite succeed");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /*
- * All writes completed. Close datasets collectively
- */
- ret = H5Dclose(dataset1);
- assert(ret != FAIL);
- MESG("H5Dclose1 succeed");
- ret = H5Dclose(dataset2);
- assert(ret != FAIL);
- MESG("H5Dclose2 succeed");
-
- /* release all IDs created */
- H5Sclose(sid1);
-
- /* close the file collectively */
- H5Fclose(fid1);
-}
-
-/*
- * Example of using the parallel HDF5 library to read two datasets
- * in one HDF5 file with collective parallel access support.
- * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
- * Each process controls only a slab of size DIM1 x DIM2 within each
- * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
- * each process controls a hyperslab within.]
- */
-
-void
-phdf5readAll(char *filename)
-{
- hid_t fid1; /* HDF5 file IDs */
- hid_t acc_tpl1; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
- DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
-
- hsize_t start[SPACE1_RANK]; /* for hyperslab setting */
- hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
-
- herr_t ret; /* Generic return value */
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- if (verbose)
- printf("Collective read test on file %s\n", filename);
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
- /* setup file access template with parallel IO access. */
- acc_tpl1 = H5Pcreate(H5P_FILE_ACCESS);
- assert(acc_tpl1 != FAIL);
- MESG("H5Pcreate access succeed");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
- assert(ret != FAIL);
- MESG("H5Pset_fapl_mpio succeed");
-
- /* open the file collectively */
- fid1 = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl1);
- assert(fid1 != FAIL);
- MESG("H5Fopen succeed");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl1);
- assert(ret != FAIL);
-
- /* --------------------------
- * Open the datasets in it
- * ------------------------- */
- /* open the dataset1 collectively */
- dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
- assert(dataset1 != FAIL);
- MESG("H5Dopen2 succeed");
-
- /* open another dataset collectively */
- dataset2 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
- assert(dataset2 != FAIL);
- MESG("H5Dopen2 2 succeed");
-
- /*
- * Set up dimensions of the slab this process accesses.
- */
-
- /* Dataset1: each process takes a block of columns. */
- slab_set(start, count, stride, BYCOL);
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- MESG("H5Dget_space succeed");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
- MESG("H5Sset_hyperslab succeed");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* fill dataset with test data */
- dataset_fill(start, count, stride, &data_origin1[0][0]);
- MESG("data_array initialized");
- if (verbose) {
- MESG("data_array created");
- dataset_print(start, count, stride, &data_array1[0][0]);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- assert(xfer_plist != FAIL);
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- assert(ret != FAIL);
- MESG("H5Pcreate xfer succeed");
-
- /* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- assert(ret != FAIL);
- MESG("H5Dread succeed");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
- assert(ret != FAIL);
-
- /* release all temporary handles. */
- /* Could have used them for dataset2 but it is cleaner */
- /* to create them again.*/
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /* Dataset2: each process takes a block of rows. */
- slab_set(start, count, stride, BYROW);
- if (verbose)
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0],
- (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)(count[0] * count[1]));
-
- /* create a file dataspace independently */
- file_dataspace = H5Dget_space(dataset1);
- assert(file_dataspace != FAIL);
- MESG("H5Dget_space succeed");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
- assert(ret != FAIL);
- MESG("H5Sset_hyperslab succeed");
-
- /* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple(SPACE1_RANK, count, NULL);
- assert(mem_dataspace != FAIL);
-
- /* fill dataset with test data */
- dataset_fill(start, count, stride, &data_origin1[0][0]);
- MESG("data_array initialized");
- if (verbose) {
- MESG("data_array created");
- dataset_print(start, count, stride, &data_array1[0][0]);
- }
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- assert(xfer_plist != FAIL);
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- assert(ret != FAIL);
- MESG("H5Pcreate xfer succeed");
-
- /* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
- assert(ret != FAIL);
- MESG("H5Dread succeed");
-
- /* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
- assert(ret != FAIL);
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- /*
- * All reads completed. Close datasets collectively
- */
- ret = H5Dclose(dataset1);
- assert(ret != FAIL);
- MESG("H5Dclose1 succeed");
- ret = H5Dclose(dataset2);
- assert(ret != FAIL);
- MESG("H5Dclose2 succeed");
-
- /* close the file collectively */
- H5Fclose(fid1);
-}
-
-/*
- * test file access by communicator besides COMM_WORLD.
- * Split COMM_WORLD into two, one (even_comm) contains the original
- * processes of even ranks. The other (odd_comm) contains the original
- * processes of odd ranks. Processes in even_comm creates a file, then
- * cloose it, using even_comm. Processes in old_comm just do a barrier
- * using odd_comm. Then they all do a barrier using COMM_WORLD.
- * If the file creation and cloose does not do correct collective action
- * according to the communicator argument, the processes will freeze up
- * sooner or later due to barrier mixed up.
- */
-void
-test_split_comm_access(char filenames[][PATH_MAX])
-{
- MPI_Comm comm;
- MPI_Info info = MPI_INFO_NULL;
- int color, mrc;
- int newrank, newprocs;
- hid_t fid; /* file IDs */
- hid_t acc_tpl; /* File access properties */
- herr_t ret; /* generic return value */
-
- if (verbose)
- printf("Independent write test on file %s %s\n", filenames[0], filenames[1]);
-
- color = mpi_rank % 2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm);
- assert(mrc == MPI_SUCCESS);
- MPI_Comm_size(comm, &newprocs);
- MPI_Comm_rank(comm, &newrank);
-
- if (color) {
- /* odd-rank processes */
- mrc = MPI_Barrier(comm);
- assert(mrc == MPI_SUCCESS);
- }
- else {
- /* even-rank processes */
- /* setup file access template */
- acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
- assert(acc_tpl != FAIL);
-
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(acc_tpl, comm, info);
- assert(ret != FAIL);
-
- /* create the file collectively */
- fid = H5Fcreate(filenames[color], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- assert(fid != FAIL);
- MESG("H5Fcreate succeed");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- assert(ret != FAIL);
-
- ret = H5Fclose(fid);
- assert(ret != FAIL);
- }
- if (mpi_rank == 0) {
- mrc = MPI_File_delete(filenames[color], info);
- assert(mrc == MPI_SUCCESS);
- }
- MPI_Comm_free(&comm);
-}
-
-/*
- * Show command usage
- */
-void
-usage(void)
-{
- printf("Usage: testphdf5 [-f <prefix>] [-r] [-w] [-v]\n");
- printf("\t-f\tfile prefix for parallel test files.\n");
- printf("\t \t e.g. pfs:/PFS/myname\n");
- printf("\t \tcan be set via $" PARAPREFIX ".\n");
- printf("\t \tDefault is current directory.\n");
- printf("\t-c\tno cleanup\n");
- printf("\t-r\tno read\n");
- printf("\t-w\tno write\n");
- printf("\t-v\tverbose on\n");
- printf("\tdefault do write then read\n");
- printf("\n");
-}
-
-/*
- * compose the test filename with the prefix supplied.
- * return code: 0 if no error
- * 1 otherwise.
- */
-int
-mkfilenames(char *prefix)
-{
- int i, n;
- size_t strsize;
-
- /* filename will be prefix/ParaEgN.h5 where N is 0 to 9. */
- /* So, string must be big enough to hold the prefix, / and 10 more chars */
- /* and the terminating null. */
- strsize = strlen(prefix) + 12;
- if (strsize > PATH_MAX) {
- printf("File prefix too long; Use a short path name.\n");
- return (1);
- }
- n = sizeof(testfiles) / sizeof(testfiles[0]);
- if (n > 9) {
- printf("Warning: Too many entries in testfiles. "
- "Need to adjust the code to accommodate the large size.\n");
- }
- for (i = 0; i < n; i++) {
- snprintf(testfiles[i], PATH_MAX, "%s/ParaEg%d.h5", prefix, i);
- }
- return (0);
-}
-
-/*
- * parse the command line options
- */
-int
-parse_options(int argc, char **argv)
-{
- int i, n;
-
- /* initialize testfiles to nulls */
- n = sizeof(testfiles) / sizeof(testfiles[0]);
- for (i = 0; i < n; i++) {
- testfiles[i][0] = '\0';
- }
-
- while (--argc) {
- if (**(++argv) != '-') {
- break;
- }
- else {
- switch (*(*argv + 1)) {
- case 'f':
- ++argv;
- if (--argc < 1) {
- usage();
- nerrors++;
- return (1);
- }
- if (mkfilenames(*argv)) {
- nerrors++;
- return (1);
- }
- break;
- case 'c':
- docleanup = 0; /* no cleanup */
- break;
- case 'r':
- doread = 0;
- break;
- case 'w':
- dowrite = 0;
- break;
- case 'v':
- verbose = 1;
- break;
- default:
- usage();
- nerrors++;
- return (1);
- }
- }
- }
-
- /* check the file prefix */
- if (testfiles[0][0] == '\0') {
- /* try get it from environment variable HDF5_PARAPREFIX */
- char *env;
- char *env_default = "."; /* default to current directory */
- if ((env = getenv(PARAPREFIX)) == NULL) {
- env = env_default;
- }
- mkfilenames(env);
- }
- return (0);
-}
-
-/*
- * cleanup test files created
- */
-void
-cleanup(void)
-{
- int i, n;
-
- n = sizeof(testfiles) / sizeof(testfiles[0]);
- for (i = 0; i < n; i++) {
- MPI_File_delete(testfiles[i], MPI_INFO_NULL);
- }
-}
-
-/* Main Program */
-int
-main(int argc, char **argv)
-{
- int mpi_namelen;
- char mpi_name[MPI_MAX_PROCESSOR_NAME];
- int i, n;
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- MPI_Get_processor_name(mpi_name, &mpi_namelen);
- /* Make sure datasets can be divided into equal chunks by the processes */
- if ((SPACE1_DIM1 % mpi_size) || (SPACE1_DIM2 % mpi_size)) {
- printf("DIM1(%d) and DIM2(%d) must be multiples of processes (%d)\n", SPACE1_DIM1, SPACE1_DIM2,
- mpi_size);
- nerrors++;
- goto finish;
- }
-
- if (parse_options(argc, argv) != 0)
- goto finish;
-
- /* show test file names */
- if (mpi_rank == 0) {
- n = sizeof(testfiles) / sizeof(testfiles[0]);
- printf("Parallel test files are:\n");
- for (i = 0; i < n; i++) {
- printf(" %s\n", testfiles[i]);
- }
- }
-
- if (dowrite) {
- MPI_BANNER("testing PHDF5 dataset using split communicators...");
- test_split_comm_access(testfiles);
- MPI_BANNER("testing PHDF5 dataset independent write...");
- phdf5writeInd(testfiles[0]);
- MPI_BANNER("testing PHDF5 dataset collective write...");
- phdf5writeAll(testfiles[1]);
- }
- if (doread) {
- MPI_BANNER("testing PHDF5 dataset independent read...");
- phdf5readInd(testfiles[0]);
- MPI_BANNER("testing PHDF5 dataset collective read...");
- phdf5readAll(testfiles[1]);
- }
-
- if (!(dowrite || doread)) {
- usage();
- nerrors++;
- }
-
-finish:
- if (mpi_rank == 0) { /* only process 0 reports */
- if (nerrors)
- printf("***PHDF5 example detected %d errors***\n", nerrors);
- else {
- printf("=====================================\n");
- printf("PHDF5 example finished with no errors\n");
- printf("=====================================\n");
- }
- }
- if (docleanup)
- cleanup();
- MPI_Finalize();
-
- return (nerrors);
-}
-
-#else /* H5_HAVE_PARALLEL */
-/* dummy program since H5_HAVE_PARALLE is not configured in */
-int
-main(void)
-{
- printf("No PHDF5 example because parallel is not configured in\n");
- return (0);
-}
-#endif /* H5_HAVE_PARALLEL */
diff --git a/examples/run-c-ex.sh.in b/examples/run-c-ex.sh.in
index aae1d34..b51c5d6 100644
--- a/examples/run-c-ex.sh.in
+++ b/examples/run-c-ex.sh.in
@@ -155,23 +155,7 @@ then
HDF5_DEBUG=$OLD_DEBUG_STRING &&\
rm h5_debug_trace &&\
RunTest h5_shared_mesg &&\
- rm h5_shared_mesg &&\
- RunTest h5_vds-eiger &&\
- rm h5_vds-eiger &&\
- RunTest h5_vds-exclim &&\
- rm h5_vds-exclim &&\
- RunTest h5_vds-exc &&\
- rm h5_vds-exc &&\
- RunTest h5_vds-simpleIO &&\
- rm h5_vds-simpleIO &&\
- RunTest h5_vds-percival &&\
- rm h5_vds-percival &&\
- RunTest h5_vds-percival-unlim &&\
- rm h5_vds-percival-unlim &&\
- RunTest h5_vds-percival-unlim-maxmin&&\
- rm h5_vds-percival-unlim-maxmin &&\
- RunTest h5_vds &&\
- rm h5_vds); then
+ rm h5_shared_mesg); then
EXIT_VALUE=${EXIT_SUCCESS}
else
EXIT_VALUE=${EXIT_FAILURE}