summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Warren <Richard.Warren@hdfgroup.org>2017-11-01 20:16:13 (GMT)
committerRichard Warren <Richard.Warren@hdfgroup.org>2017-11-01 20:16:13 (GMT)
commit86ef356e0955041768e6a78d8585b52239940820 (patch)
tree8144f204cc656a1071a879f9c8a926b74fde9119
parenta244cf2e9c7b586d94370c697014052a1865b0c5 (diff)
downloadhdf5-inactive/parallel_vds_testing.zip
hdf5-inactive/parallel_vds_testing.tar.gz
hdf5-inactive/parallel_vds_testing.tar.bz2
Add two(2) new parallel VDS tests, the VDS merge code for Neil, and a few edits to prevent errorsinactive/parallel_vds_testing
-rw-r--r--src/H5Dvirtual.c21
-rw-r--r--testpar/CMakeLists.txt2
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/t_ph5_vds.c504
-rw-r--r--testpar/t_pvdsunl.c387
5 files changed, 911 insertions, 5 deletions
diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c
index 3a7f6a1..98515a0 100644
--- a/src/H5Dvirtual.c
+++ b/src/H5Dvirtual.c
@@ -877,7 +877,9 @@ H5D__virtual_open_source_dset(const H5D_t *vdset,
H5O_storage_virtual_srcdset_t *source_dset, hid_t dxpl_id)
{
H5F_t *src_file = NULL; /* Source file */
+#ifdef NOT_USED
hbool_t src_file_open = FALSE; /* Whether we have opened and need to close src_file */
+#endif
H5G_loc_t src_root_loc; /* Object location of source file root group */
herr_t ret_value = SUCCEED; /* Return value */
@@ -893,10 +895,20 @@ H5D__virtual_open_source_dset(const H5D_t *vdset,
/* Check if we need to open the source file */
if(HDstrcmp(source_dset->file_name, ".")) {
/* Open the source file */
- if(NULL == (src_file = H5F_open(source_dset->file_name, H5F_INTENT(vdset->oloc.file) & (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ), H5P_FILE_CREATE_DEFAULT, vdset->shared->layout.storage.u.virt.source_fapl, dxpl_id)))
- H5E_clear_stack(NULL); /* Quick hack until proper support for H5Fopen with missing file is implemented */
+ if(NULL == (src_file = H5F_open(source_dset->file_name,
+ H5F_INTENT(vdset->oloc.file) &
+ (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ),
+ H5P_FILE_CREATE_DEFAULT,
+ vdset->shared->layout.storage.u.virt.source_fapl,
+ dxpl_id)))
+ /* Quick hack until proper support for H5Fopen
+ * with missing file is implemented
+ */
+ H5E_clear_stack(NULL);
+#ifdef NOT_USED
else
src_file_open = TRUE;
+#endif
} /* end if */
else
/* Source file is ".", use the virtual dataset's file */
@@ -930,11 +942,12 @@ H5D__virtual_open_source_dset(const H5D_t *vdset,
} /* end if */
done:
+#ifdef NOT_USED
/* Close source file */
if(src_file_open)
if(H5F_try_close(src_file, NULL) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEFILE, FAIL, "can't close source file")
-
+#endif
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__virtual_open_source_dset() */
@@ -2579,7 +2592,7 @@ H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
storage = &io_info->dset->shared->layout.storage.u.virt;
HDassert((storage->view == H5D_VDS_FIRST_MISSING) || (storage->view == H5D_VDS_LAST_AVAILABLE));
-#ifdef H5_HAVE_PARALLEL
+#if defined(H5_HAVE_PARALLEL) && defined(H5_DISABLE_THIS_CHECK)
/* Parallel reads are not supported (yet) */
if(H5F_HAS_FEATURE(io_info->dset->oloc.file, H5FD_FEAT_HAS_MPI))
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "parallel reads not supported on virtual datasets")
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 0c9f70e..e39f25a 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -53,6 +53,8 @@ set (H5P_TESTS
t_init_term
t_shapesame
t_filters_parallel
+ t_ph5_vds
+ t_pvdsunl
)
foreach (testp ${H5P_TESTS})
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 1f15830..51fea81 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -23,7 +23,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
+TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_ph5_vds t_pvdsunl
check_PROGRAMS = $(TEST_PROG_PARA)
diff --git a/testpar/t_ph5_vds.c b/testpar/t_ph5_vds.c
new file mode 100644
index 0000000..a56164c
--- /dev/null
+++ b/testpar/t_ph5_vds.c
@@ -0,0 +1,504 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/************************************************************
+
+ This example illustrates the concept of virtual dataset.
+ The program creates three 1-dim source datasets and writes
+ data to them. Then it creates a 2-dim virtual dataset and
+ maps the first three rows of the virtual dataset to the data
+ in the source datasets. Elements of a row are mapped to all
+ elements of the corresponding source dataset.
+ The fourth row is not mapped and will be filled with the fill
+ values when virtual dataset is read back.
+
+ The program closes all datasets, and then reopens the virtual
+ dataset, and finds and prints its creation properties.
+ Then it reads the values.
+
+ This file is intended for use with HDF5 Library version 1.10
+
+ ************************************************************
+ RAW - This example was modified for testing parallel VDS
+ functionality.
+ ************************************************************/
+/* EIP Add link to the picture */
+#include "h5test.h"
+#include "testpar.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define FILE "vds.h5"
+#define DATASET "VDS"
+#define VDSDIM1 100
+#define VDSDIM0 4
+#define DIM0 100
+#define RANK1 1
+#define RANK2 2
+
+#define NFILENAME 4
+const char *FILENAMES[NFILENAME + 1] = { "a.h5",
+ "b.h5",
+ "c.h5",
+ "vds.h5",
+ NULL };
+
+const char *SRC_DATASET[] = {
+ "A",
+ "B",
+ "C"
+};
+
+
+int
+main (int argc, char **argv)
+{
+ hid_t
+ memspace = -1,
+ dataspace = -1,
+ filespace = -1,
+ src_space = -1,
+ src_dset = -1,
+ vds_file = -1,
+ file_id = -1,
+ fapl_id = -1,
+ dxpl_id = -1,
+ vspace = -1,
+ vdsset = -1,
+ dcpl = -1;
+ herr_t status;
+ hsize_t vdsdims[2] = {VDSDIM0, VDSDIM1}, /* Virtual datasets dimension */
+ dims[1] = {DIM0}, /* Source datasets dimensions */
+ start[2], /* Hyperslab parameters */
+ count[2],
+ block[2];
+ hsize_t start_out[2],
+ stride_out[2],
+ count_out[2],
+ block_out[2];
+ int wdata[DIM0], /* Write buffer for source dataset */
+ rdata[VDSDIM0][VDSDIM1], /* Read buffer for virtual dataset */
+ i, j, k, l;
+ int fill_value = -1; /* Fill value for VDS */
+ H5D_layout_t layout; /* Storage layout */
+ size_t num_map; /* Number of mappings */
+ ssize_t len; /* Length of the string; also a return value */
+ char *filename;
+ char *dsetname;
+ hsize_t nblocks;
+ hsize_t elements;
+ hsize_t offset;
+ hsize_t *buf; /* Buffer to hold hyperslab coordinates */
+
+ int group_status = 0, nerrs = 0;
+ int mpi_rank;
+ int mpi_size;
+ int workgroup;
+ MPI_Comm group_comm = MPI_COMM_NULL;
+
+ if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ workgroup = (mpi_rank < VDSDIM0 ? 0 : 1);
+ if ( (MPI_Comm_split(MPI_COMM_WORLD,
+ workgroup,
+ 0,
+ &group_comm)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ H5open();
+
+ if ( mpi_rank == 0 ) {
+ HDfprintf(stdout, "========================================\n");
+ HDfprintf(stdout, "Parallel VDS functionality tests\n");
+ HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
+ HDfprintf(stdout, "========================================\n");
+ }
+
+ /*
+ * Create source files and datasets.
+ * we'll assign one MPI rank per file...
+ */
+ for (i=0; i < 3; i++) {
+
+ if (mpi_rank == i) {
+ /*
+ * Initialize data for i-th source dataset.
+ */
+ for (j = 0; j < DIM0; j++) wdata[j] = i+1;
+
+ /*
+ * Create the source files and datasets. Write data to each dataset and
+ * close all resources.
+ */
+
+ if ((file_id = H5Fcreate (FILENAMES[i], H5F_ACC_TRUNC,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if ((src_space = H5Screate_simple (RANK1, dims, NULL)) < 0) TEST_ERROR;
+ if ((src_dset = H5Dcreate2 (file_id, SRC_DATASET[i], H5T_NATIVE_INT,
+ src_space, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) TEST_ERROR;
+ if ((status = H5Dwrite (src_dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, wdata)) < 0) TEST_ERROR;
+
+ if ((status = H5Dclose (src_dset)) < 0) TEST_ERROR;
+ if ((status = H5Sclose (src_space)) < 0) TEST_ERROR;
+ if ((status = H5Fclose (file_id)) < 0) TEST_ERROR;
+ }
+ }
+
+ src_dset = -1;
+ src_space = -1;
+ file_id = -1;
+
+ /* We allow at most VDSDIM0 mpi ranks to participate
+ * in the actual VDS testing, i.e. in the best case we
+ * do 1 mpi rank per data slice. We accomplished this
+ * by creating an MPI 'group_comm' communicator which
+ * contains at most VDSDIM0 ranks. All other processes
+ * skip the testing part but need participate in the
+ * MPI_Allgather to exchange global status.
+ */
+ if ( mpi_rank < VDSDIM0) {
+
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) TEST_ERROR;
+ if ( (H5Pset_fapl_mpio(fapl_id, group_comm,
+ MPI_INFO_NULL)) < 0) TEST_ERROR;
+
+ /* Create file in which virtual dataset will be stored. */
+ if ( (vds_file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT,
+ fapl_id)) < 0 ) TEST_ERROR;
+
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) TEST_ERROR;
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) TEST_ERROR;
+
+ /* Create VDS dataspace. */
+ if ( (dataspace = H5Screate_simple (RANK2, vdsdims, NULL)) < 0 ) TEST_ERROR;
+
+ /* Set VDS creation property. */
+ if ( (dcpl = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) TEST_ERROR;
+
+ if ( (H5Pset_fill_value (dcpl, H5T_NATIVE_INT, &fill_value)) < 0 ) TEST_ERROR;
+
+ /* Initialize hyperslab values. */
+ start[0] = 0;
+ start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = VDSDIM1;
+
+ /*
+ * Build the mappings.
+ * Selections in the source datasets are H5S_ALL.
+ * In the virtual dataset we select the first, the second and the third rows
+ * and map each row to the data in the corresponding source dataset.
+ */
+ if ((src_space = H5Screate_simple (RANK1, dims, NULL)) < 0 ) TEST_ERROR;
+
+ for (i=0; i < 3; i++) {
+ start[0] = (hsize_t)i;
+ /* Select i-th row in the virtual dataset;
+ * selection in the source datasets is the same.
+ */
+ if ((status = H5Sselect_hyperslab (dataspace, H5S_SELECT_SET,
+ start, NULL, count, block)) < 0) TEST_ERROR;
+ if ((status = H5Pset_virtual (dcpl, dataspace, FILENAMES[i],
+ SRC_DATASET[i], src_space)) < 0) TEST_ERROR;
+ }
+
+ /* Create a virtual dataset. */
+ if ((vdsset = H5Dcreate2 (vds_file, DATASET, H5T_NATIVE_INT,
+ dataspace, H5P_DEFAULT,
+ dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+ /* Now close up the file and close the various
+ * other HDF5 descriptors */
+ if ((status = H5Dclose (vdsset)) < 0) TEST_ERROR;
+ vdsset = -1;
+ if ((status = H5Sclose (dataspace)) < 0) TEST_ERROR;
+ dataspace = -1;
+ if ((status = H5Sclose (src_space)) < 0) TEST_ERROR;
+ src_space = -1;
+ if ((status = H5Fclose (vds_file)) < 0) TEST_ERROR;
+ vds_file = -1;
+
+ /*
+ * Now we begin the read section of this example.
+ */
+
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ HDfprintf(stderr, "H5Pcreate - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (status = H5Pset_fapl_mpio(fapl_id, group_comm,
+ MPI_INFO_NULL)) < 0 ) {
+ HDfprintf(stderr, "H5Pset_fapl_mpio - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ /*
+ * Open the file and virtual dataset.
+ */
+ if ((vds_file = H5Fopen (FILE, H5F_ACC_RDONLY, fapl_id))
+ < 0) {
+ HDfprintf(stderr, "H5Fopen - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ((vdsset = H5Dopen2 (vds_file, DATASET, H5P_DEFAULT))
+ < 0) {
+ HDfprintf(stderr, "H5Fopen2 - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ /*
+ * Get creation property list and mapping properties.
+ */
+ if ((dcpl = H5Dget_create_plist (vdsset)) < 0) {
+ HDfprintf(stderr, "H5Dget_create_plist - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ /*
+ * Get storage layout.
+ */
+ if ((layout = H5Pget_layout (dcpl)) < 0) {
+ HDfprintf(stderr, "H5Pget_layout - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if (mpi_rank == 0) {
+ if (layout == H5D_VIRTUAL )
+ HDfprintf(stdout, " Dataset has a virtual layout\n");
+ else
+ HDfprintf(stdout, " Wrong layout found \n");
+
+ /*
+ * Find the number of mappings.
+ */
+ if ((status = H5Pget_virtual_count (dcpl, &num_map))
+ < 0) {
+ HDfprintf(stderr, "H5Pget_layout - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ HDfprintf(stdout, " Number of mappings is %lu\n",
+ (unsigned long)num_map);
+
+ /*
+ * Get mapping parameters for each mapping.
+ */
+ nerrs = 0;
+ for (i = 0; i < (int)num_map; i++) {
+ HDfprintf(stdout, " Mapping %d \n", i);
+ HDfprintf(stdout, " Selection in the virtual dataset ");
+ /* Get selection in the virtual dataset */
+ if ((vspace = H5Pget_virtual_vspace (dcpl, (size_t)i))
+ < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_vspace - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ /* Make sure that this is a hyperslab selection
+ * and then print information.
+ */
+ if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
+ if ((nblocks = H5Sget_select_hyper_nblocks (vspace)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_vspace - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ((buf = (hsize_t *)HDmalloc(sizeof(hsize_t)*2*RANK2*nblocks))
+ == NULL) {
+ HDfprintf(stderr, "HDmalloc - returned a NULL\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ((H5Sget_select_hyper_blocklist (vspace, (hsize_t)0,
+ nblocks, buf)) < 0) {
+ HDfprintf(stderr, "H5Sget_select_hyper_blocklist - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ for (l=0; l<nblocks; l++) {
+ HDfprintf(stdout, "(");
+ for (k=0; k<RANK2-1; k++)
+ HDfprintf(stdout ,"%d,", (int)buf[k]);
+ HDfprintf(stdout, "%d ) - (", (int)buf[k]);
+ for (k=0; k<RANK2-1; k++)
+ HDfprintf(stdout, "%d,", (int)buf[RANK2+k]);
+ HDfprintf(stdout, "%d)\n", (int)buf[RANK2+k]);
+ }
+
+ /* We also can use new APIs to get the 'start',
+ * 'stride', 'count' and 'block' values */
+ if (H5Sis_regular_hyperslab(vspace)) {
+ if ((H5Sget_regular_hyperslab (vspace, start_out,
+ stride_out, count_out,
+ block_out)) < 0) {
+ HDfprintf(stderr,
+ "H5Sget_regular_hyperslab - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ printf(" start = [%llu, %llu] \n",
+ (unsigned long long)start_out[0],
+ (unsigned long long)start_out[1]);
+ printf(" stride = [%llu, %llu] \n",
+ (unsigned long long)stride_out[0],
+ (unsigned long long)stride_out[1]);
+ printf(" count = [%llu, %llu] \n",
+ (unsigned long long)count_out[0],
+ (unsigned long long)count_out[1]);
+ printf(" block = [%llu, %llu] \n",
+ (unsigned long long)block_out[0],
+ (unsigned long long)block_out[1]);
+ }
+ }
+ /* Get source file name. */
+ if ((len = H5Pget_virtual_filename (dcpl, (size_t)i, NULL, 0)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_filename - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ((filename = (char *)HDmalloc((size_t)len*sizeof(char)+1)) == NULL) {
+ HDfprintf(stderr, "HDmalloc - returned NULL\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ((H5Pget_virtual_filename (dcpl, (size_t)i, filename, len+1)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_filename - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ HDfprintf(stdout, " Source filename %s\n", filename);
+
+ /* Get source dataset name. */
+ if ((len = H5Pget_virtual_dsetname (dcpl, (size_t)i, NULL, 0)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_dsetname - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ((dsetname = (char *)HDmalloc((size_t)len*sizeof(char)+1)) == NULL) {
+ HDfprintf(stderr, "HDmalloc - returned NULL\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ((H5Pget_virtual_dsetname (dcpl, (size_t)i, dsetname, len+1)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_dsetname - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ HDfprintf(stdout," Source dataset name %s\n", dsetname);
+
+ /* Get selection in the source dataset. */
+ HDprintf(" Selection in the source dataset ");
+ if ((src_space = H5Pget_virtual_srcspace (dcpl, (size_t)i)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_filename - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Make sure it is ALL selection and then print the coordinates. */
+ if(H5Sget_select_type(src_space) == H5S_SEL_ALL) {
+ HDfprintf(stdout, "(0) - (%d) \n", DIM0-1);
+ }
+ }
+ }
+
+ HDmemset(rdata,0,sizeof(rdata));
+ elements = DIM0;
+ dims[0] = elements;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ HDfprintf(stderr, "H5Screate_simple - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (filespace = H5Dget_space(vdsset)) < 0 ) {
+ HDfprintf(stderr, "H5Dget_space - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ offset = (hsize_t)mpi_rank * elements;
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ &offset, NULL, &elements, NULL)) < 0 ) {
+ HDfprintf(stderr, "H5Sselect_hyperslab - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ((status = H5Dread (vdsset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ rdata[0])) < 0) {
+ HDfprintf(stderr, "H5Dread - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if (mpi_rank == 0) {
+ for(i=0; i<VDSDIM0; i++) {
+ HDfprintf (stdout, " VDS Data: [%d][0-99]\n [", i);
+ for (j=0; j<VDSDIM1; j++) {
+ if ((j < 6) || (j > VDSDIM1 -3))
+ HDfprintf (stdout," %3d", rdata[i][j]);
+ else if (j == 6)
+ HDfprintf (stdout, " ... ");
+ }
+ HDfprintf (stdout, "]\n");
+ }
+ }
+ }
+
+ if (mpi_size > 1) {
+ if ((MPI_Allreduce(&nerrs, &group_status, 1,
+ MPI_INT, MPI_SUM, MPI_COMM_WORLD))
+ != MPI_SUCCESS)
+ group_status = 1;
+ }
+ if (mpi_rank == 0) {
+ if (group_status > 0)
+ HDfprintf(stderr, "There were internal errors encountered during this test\n");
+ else
+ HDfprintf(stdout, "No Errors!\n");
+
+ /* remove the files that a no longer needed */
+ for(i=0; i < NFILENAME; i++) {
+ HDremove(FILENAMES[i]);
+ }
+ }
+
+error:
+ /*
+ * Close and release resources.
+ */
+ if (group_comm != MPI_COMM_NULL) {
+ MPI_Comm_free(&group_comm);
+ }
+
+ if (dcpl != -1) H5Pclose (dcpl);
+ if (vdsset != -1) H5Dclose (vdsset);
+ if (dataspace != -1) H5Sclose (dataspace);
+ if (fapl_id != -1) H5Pclose (fapl_id);
+ if (vds_file != -1) H5Fclose (vds_file);
+
+ /* close HDF5 library */
+ if (H5close() != SUCCEED) {
+ HDfprintf(stdout, "H5close() failed. (Ignoring)\n");
+ }
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+
+ MPI_Finalize();
+
+ /* cannot just return (nerrs) because exit code is limited to 1byte */
+ return((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS );
+}
+
diff --git a/testpar/t_pvdsunl.c b/testpar/t_pvdsunl.c
new file mode 100644
index 0000000..fb68a70
--- /dev/null
+++ b/testpar/t_pvdsunl.c
@@ -0,0 +1,387 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/************************************************************
+
+
+ ************************************************************/
+
+#include "h5test.h"
+#include "testpar.h"
+
+#define VFILE "vdsunl.h5"
+#define DATASET "VDSUNL"
+#define VDSDIM1 6
+#define VDSDIM0 4
+#define DIM0 6
+#define RANK1 1
+#define RANK2 2
+
+#define SRCFILE "srcfile.h5"
+const char *FILENAMES[] = { VFILE, SRCFILE, NULL };
+
+const char *SRC_DATASET[] = {
+ "A-0",
+ "A-1",
+ "A-2",
+ "A-3"
+};
+
+int
+main (int argc, char **argv)
+{
+ /* Handles */
+ hid_t vfile = -1,
+ space = -1,
+ vspace = -1,
+ dset = -1,
+ src_file = -1,
+ src_dset = -1,
+ src_space = -1,
+ fapl_id = -1,
+ dxpl_id = -1,
+ dcpl = -1;
+ herr_t status;
+ hsize_t vdsdims[2] = {VDSDIM0, VDSDIM1}, /* Virtual datasets dimension */
+ vdsdims_max[2] = {H5S_UNLIMITED, VDSDIM1},
+ dims[1] = {DIM0}, /* Source datasets dimensions */
+ start[2], /* Hyperslab parameters */
+ stride[2],
+ count[2],
+ block[2];
+ hsize_t start_out[2],
+ stride_out[2],
+ count_out[2],
+ block_out[2];
+ int wdata[DIM0], /* Write buffer for source dataset */
+ rdata[VDSDIM0][VDSDIM1], /* Read buffer for virtual dataset */
+ i, j, k, l;
+ int fill_value = -1; /* Fill value for VDS */
+ H5D_layout_t layout; /* Storage layout */
+ size_t num_map; /* Number of mappings */
+ ssize_t len; /* Length of the string; also a return value */
+ char *filename;
+ char *dsetname;
+ hsize_t nblocks;
+ hsize_t *buf; /* Buffer to hold hyperslab coordinates */
+
+
+ int mpi_rank;
+ int mpi_size;
+ int workgroup;
+ MPI_Comm group_comm = MPI_COMM_WORLD;
+
+ if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if (mpi_size > 4) {
+ workgroup = (mpi_rank < 4 ? 0 : 1);
+ if (MPI_Comm_split(MPI_COMM_WORLD, workgroup,
+ 0, &group_comm) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ }
+
+ if (H5open() < 0) {
+ HDfprintf(stderr, "FATAL: H5open returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if (mpi_rank > 3) goto error;
+
+ if ( mpi_rank == 0 ) {
+ HDfprintf(stdout, "========================================\n");
+ HDfprintf(stdout, "Parallel VDS functionality tests\n");
+ HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
+ HDfprintf(stdout, "========================================\n");
+ }
+
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ HDfprintf(stderr, "H5Pcreate(1) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ /* Please note that only the low order ranks (0-3)
+ * of MPI_COMM_WORLD are participating in the actual parallel
+ * testing. The rationale for this is that while we can
+ * arbitrarily scale the test, we need to pre-identify the
+ * set of files that will be used. This is required for
+ * automated testing since we need a mechanism to clean
+ * up the "debris" from failed tests, which currently
+ * gets accomplished by examining the executable to
+ * determine the *.h5 strings which are assumed to be
+ * generated test files and can thus be deleted.
+ */
+ if ( (status = H5Pset_fapl_mpio(fapl_id, group_comm,
+ MPI_INFO_NULL)) < 0 ) {
+ HDfprintf(stderr, "H5Pset_fapl_mpio - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ((src_file = H5Fcreate (SRCFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ HDfprintf(stderr, "H5Fcreate(1) - failed to open the file\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
+ HDfprintf(stderr, "H5Pcreate(1) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
+ HDfprintf(stderr, "H5Pset_dxpl_mpio(1) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (src_space = H5Screate_simple (RANK1, dims, NULL)) < 0) {
+ HDfprintf(stderr, "H5Screate_simple(1) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ for (i=0; i < VDSDIM0; i++) {
+ for (j = 0; j < DIM0; j++) wdata[j] = i;
+ if ( (src_dset = H5Dcreate2 (src_file, SRC_DATASET[i],
+ H5T_NATIVE_INT, src_space, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ HDfprintf(stderr, "H5Dcreate2 - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (status = H5Dwrite (src_dset,
+ H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, wdata)) < 0) {
+ HDfprintf(stderr, "H5Dwrite - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if (H5Dclose (src_dset) < 0) {
+ HDfprintf(stderr, "H5Dclose - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ src_dset = -1;
+ }
+
+ if ( (vfile = H5Fcreate (VFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ HDfprintf(stderr, "H5Fcreate(2) - failed to open the file\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (vspace = H5Screate_simple (RANK2, vdsdims, vdsdims_max)) < 0) {
+ HDfprintf(stderr, "H5Screate_simple(2) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (dcpl = H5Pcreate (H5P_DATASET_CREATE)) < 0) {
+ HDfprintf(stderr, "H5Pcreate(2) - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (status = H5Pset_fill_value (dcpl, H5T_NATIVE_INT, &fill_value)) < 0) {
+ HDfprintf(stderr, "H5Pset_fill_value - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ start[0] = 0;
+ start[1] = 0;
+ count[0] = H5S_UNLIMITED;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = DIM0;
+
+ if ( (status = H5Sselect_hyperslab (vspace, H5S_SELECT_SET,
+ start, NULL, count, block)) < 0) {
+ HDfprintf(stderr, "H5Sselect_hyperslab - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (status = H5Pset_virtual (dcpl, vspace,
+ SRCFILE, "/A-%b", src_space)) < 0) {
+ HDfprintf(stderr, "H5Pset_virtual - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (dset = H5Dcreate2 (vfile, DATASET, H5T_NATIVE_INT,
+ vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) {
+ HDfprintf(stderr, "H5Dcreate2 - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if (vspace != -1)
+ status = H5Sclose (vspace);
+ vspace = -1;
+ if (dset != -1)
+ status = H5Dclose (dset);
+ dset = -1;
+ if (dcpl != -1)
+ status = H5Pclose (dcpl);
+ dcpl = -1;
+ if (vfile != -1)
+ status = H5Fclose (vfile);
+ vfile = -1;
+ if (src_space != -1)
+ status = H5Sclose (src_space);
+ src_space = -1;
+ if (src_file != -1)
+ status = H5Fclose (src_file);
+ src_file = -1;
+
+ /* We haven't closed the old fapl_id, so let's try reusing it here */
+ /* Re-open the virtual file and dataset. */
+
+ if ( (vfile = H5Fopen (VFILE, H5F_ACC_RDONLY, fapl_id)) < 0) {
+ HDfprintf(stderr, "H5Fopen - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (dset = H5Dopen2 (vfile, DATASET, H5P_DEFAULT)) < 0) {
+ HDfprintf(stderr, "H5Dopen2 - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (dcpl = H5Dget_create_plist (dset)) < 0) {
+ HDfprintf(stderr, "H5Dget_create_plist - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (layout = H5Pget_layout (dcpl)) < 0) {
+ HDfprintf(stderr, "H5Dget_layout - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if ( (status = H5Pget_virtual_count (dcpl, &num_map)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_count - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+
+ if (mpi_rank == 0) {
+ /* Print out some results... */
+ if (H5D_VIRTUAL == layout)
+ HDfprintf(stdout, " Dataset has a virtual layout \n");
+ else
+ HDfprintf(stderr, " Wrong layout found \n");
+
+ HDfprintf(stdout, " Number of mappings is %lu\n", (unsigned long)num_map);
+ for (i = 0; i < (int)num_map; i++) {
+ HDfprintf(stdout, " Mapping %d \n", i);
+ HDfprintf(stdout, " Selection in the virtual dataset ");
+ if ( (vspace = H5Pget_virtual_vspace (dcpl, (size_t)i)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_vspace - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if (H5Sget_select_type(vspace) == H5S_SEL_HYPERSLABS) {
+ if (H5Sis_regular_hyperslab(vspace)) {
+ status = H5Sget_regular_hyperslab (vspace, start_out, stride_out, count_out, block_out);
+ HDfprintf(stdout,"\n start = [%llu, %llu] \n", (unsigned long long)start_out[0], (unsigned long long)start_out[1]);
+ HDfprintf(stdout," stride = [%llu, %llu] \n", (unsigned long long)stride_out[0], (unsigned long long)stride_out[1]);
+ HDfprintf(stdout," count = [%lli, %llu] \n", (signed long long)count_out[0], (unsigned long long)count_out[1]);
+ HDfprintf(stdout," block = [%llu, %llu] \n", (unsigned long long)block_out[0], (unsigned long long)block_out[1]);
+ }
+
+ }
+ if ((len = H5Pget_virtual_filename (dcpl, (size_t)i, NULL, 0)) < 0) {
+ HDfprintf(stderr, "H5Pget_virtual_filename - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ filename = (char *)malloc((size_t)len*sizeof(char)+1);
+ if (filename == NULL) {
+ HDfprintf(stderr, "malloc(1) - returned NULL\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ H5Pget_virtual_filename (dcpl, (size_t)i, filename, len+1);
+ HDfprintf(stdout," Source filename %s\n", filename);
+ len = H5Pget_virtual_dsetname (dcpl, (size_t)i, NULL, 0);
+ dsetname = (char *)malloc((size_t)len*sizeof(char)+1);
+ if (dsetname == NULL) {
+ HDfprintf(stderr, "malloc(2) - returned NULL\n");
+ HDexit(EXIT_FAILURE);
+ }
+ H5Pget_virtual_dsetname (dcpl, (size_t)i, dsetname, len+1);
+ HDfprintf(stdout," Source dataset name %s\n", dsetname);
+
+ HDfprintf(stdout," Selection in the source dataset ");
+ if ((src_space = H5Pget_virtual_srcspace (dcpl, (size_t)i)) < 0) {
+ HDfprintf(stdout,"H5Pget_virtual_srcspace - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if(H5Sget_select_type(src_space) == H5S_SEL_ALL) {
+ HDfprintf(stdout, "(0) - (%d) \n", DIM0-1);
+ }
+
+ if (vspace == -1)
+ status = H5Sclose(vspace);
+ vspace = -1;
+ if (src_space == -1)
+ status = H5Sclose(src_space);
+ src_space = -1;
+ if (filename != NULL)
+ free(filename);
+ if (dsetname != NULL)
+ free(dsetname);
+ }
+ }
+ if ( (status = H5Dread (dset, H5T_NATIVE_INT, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, rdata[0])) < 0) {
+ HDfprintf(stderr, "H5Dread - returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+ if (mpi_rank == 0) {
+ printf (" VDS Data:\n");
+ for (i=0; i<VDSDIM0; i++) {
+ printf (" [");
+ for (j=0; j<VDSDIM1; j++)
+ printf (" %3d", rdata[i][j]);
+ printf (" ]\n");
+ }
+ }
+ if (dcpl != -1) H5Pclose (dcpl);
+ dcpl = -1;
+ if (dset != -1) H5Dclose (dset);
+ dset = -1;
+ if (vfile != -1) H5Fclose (vfile);
+ vfile = -1;
+
+ error:
+ /*
+ * Close and release resources.
+ */
+ if (group_comm != MPI_COMM_WORLD) {
+ MPI_Comm_free(&group_comm);
+ }
+
+ if (dcpl != -1) H5Pclose (dcpl);
+ if (dset != -1) H5Dclose (dset);
+ if (src_dset != -1) H5Dclose( src_dset);
+ if (src_space != -1) H5Sclose (src_space);
+ if (fapl_id != -1) H5Pclose (fapl_id);
+ if (dxpl_id != -1) H5Pclose (dxpl_id);
+ if (src_file != -1) H5Fclose (src_file);
+
+ for(i=0; FILENAMES[i] != NULL; i++) {
+ HDremove(FILENAMES[i]);
+ }
+
+ /* close HDF5 library */
+ if (H5close() != SUCCEED) {
+ HDfprintf(stdout, "H5close() failed. (Ignoring)\n");
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}