summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2012-04-06 02:57:06 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2012-04-06 02:57:06 (GMT)
commitd5a62239587f7cc5de301fa5c6b0919807689818 (patch)
tree40a36d60dd8ac2d10a45886869cc53d3fdc9181e /testpar
parentebf3d99b955c705217227fb7f8ae4405e941399a (diff)
downloadhdf5-d5a62239587f7cc5de301fa5c6b0919807689818.zip
hdf5-d5a62239587f7cc5de301fa5c6b0919807689818.tar.gz
hdf5-d5a62239587f7cc5de301fa5c6b0919807689818.tar.bz2
[svn-r22254] Description:
Bring r22085:22251 from trunk to revise_chunks branch. Also tackle some testing issues in test/objcopy.c test and clean up some warnings. Tested on: FreeBSD/32 8.2 (loyalty) w/gcc4.6, w/C++ & FORTRAN, in debug mode FreeBSD/64 8.2 (freedom) w/gcc4.6, w/C++ & FORTRAN, in debug mode Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (koala) w/Intel compilers, w/default API=1.6.x, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, w/threadsafe, in production mode Linux/PPC 2.6 (ostrich) w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-ia64 2.6 (ember) w/Intel compilers, w/paralle, C++ & FORTRAN, in production mode Mac OS X/32 10.7.3 (amazon) in debug mode Mac OS X/32 10.7.3 (amazon) w/C++ & FORTRAN, w/threadsafe, in production mode Mac OS X/32 10.7.3 (amazon) w/parallel, in debug mode
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt1
-rw-r--r--[-rwxr-xr-x]testpar/COPYING0
-rw-r--r--testpar/Makefile.am4
-rw-r--r--testpar/Makefile.in12
-rw-r--r--testpar/t_dset.c325
-rw-r--r--testpar/t_file_image.c395
-rw-r--r--testpar/testphdf5.c19
-rw-r--r--testpar/testphdf5.h4
8 files changed, 753 insertions, 7 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index d60aea5..f42af9f 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -11,6 +11,7 @@ SET (testphdf5_SRCS
${HDF5_TEST_PAR_SOURCE_DIR}/testphdf5.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_dset.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_file.c
+ ${HDF5_TEST_PAR_SOURCE_DIR}/t_file_image.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_mdset.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_ph5basic.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_chunk.c
diff --git a/testpar/COPYING b/testpar/COPYING
index 6903daf..6903daf 100755..100644
--- a/testpar/COPYING
+++ b/testpar/COPYING
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 5a7a3f3..b2fb97c 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -31,8 +31,8 @@ TEST_SCRIPT_PARA=testph5.sh
check_PROGRAMS = $(TEST_PROG_PARA)
check_SCRIPTS= $(TEST_SCRIPT)
-testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
- t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
+ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
# The tests all depend on the hdf5 library and the test library
LDADD = $(LIBH5TEST) $(LIBHDF5)
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index ae0c7d0..a9a4135 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -95,9 +95,10 @@ t_shapesame_OBJECTS = t_shapesame.$(OBJEXT)
t_shapesame_LDADD = $(LDADD)
t_shapesame_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5)
am_testphdf5_OBJECTS = testphdf5.$(OBJEXT) t_dset.$(OBJEXT) \
- t_file.$(OBJEXT) t_mdset.$(OBJEXT) t_ph5basic.$(OBJEXT) \
- t_coll_chunk.$(OBJEXT) t_span_tree.$(OBJEXT) \
- t_chunk_alloc.$(OBJEXT) t_filter_read.$(OBJEXT)
+ t_file.$(OBJEXT) t_file_image.$(OBJEXT) t_mdset.$(OBJEXT) \
+ t_ph5basic.$(OBJEXT) t_coll_chunk.$(OBJEXT) \
+ t_span_tree.$(OBJEXT) t_chunk_alloc.$(OBJEXT) \
+ t_filter_read.$(OBJEXT)
testphdf5_OBJECTS = $(am_testphdf5_OBJECTS)
testphdf5_LDADD = $(LDADD)
testphdf5_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5)
@@ -431,8 +432,8 @@ INCLUDES = -I$(top_srcdir)/src -I$(top_srcdir)/test
TEST_PROG_PARA = t_mpi t_posix_compliant testphdf5 t_cache t_pflush1 t_pflush2 t_shapesame
TEST_SCRIPT_PARA = testph5.sh
check_SCRIPTS = $(TEST_SCRIPT)
-testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
- t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
+ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
# The tests all depend on the hdf5 library and the test library
@@ -532,6 +533,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_coll_chunk.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_dset.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_file.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_file_image.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_filter_read.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_mdset.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_mpi.Po@am__quote@
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 699f86d..1166ce1 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -3065,3 +3065,328 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
+
+/*
+ * Test consistency semantics of atomic mode
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create a dataset,
+ * where process 0 writes and the other processes read at the same
+ * time. If atomic mode is set correctly, the other processes should
+ * read the old values in the dataset or the new ones.
+ */
+
+void
+dataset_atomicity(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ const char *filename;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64; dim1 = 32;
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ printf("Independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+
+ buf_size = dim0 * dim1;
+ /* allocate memory for data buffer */
+ write_buf = (int *)calloc(buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf malloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)calloc(buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf malloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = dim0;
+ dims[1] = dim1;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create datasets */
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* initialize datasets to 0s */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ MPI_Barrier (comm);
+
+ /* make sure setting atomicity fails on a serial file ID */
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
+
+ /* should fail */
+ ret = H5Fset_mpi_atomicity (fid , TRUE);
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ MPI_Barrier (comm);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fset_mpi_atomicity (fid , TRUE);
+ VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
+
+ /* open dataset1 (contiguous case) */
+ dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ if (0 == mpi_rank) {
+ for (i=0 ; i<buf_size ; i++) {
+ write_buf[i] = 5;
+ }
+ }
+ else {
+ for (i=0 ; i<buf_size ; i++) {
+ read_buf[i] = 8;
+ }
+ }
+
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ MPI_Barrier (comm);
+
+ /* Process 0 writes contiguously to the entire dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ }
+ /* The other processes read the entire dataset */
+ else {
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+ }
+
+ if(VERBOSE_MED) {
+ i=0;j=0;k=0;
+ for (i=0 ; i<dim0 ; i++) {
+ printf ("\n");
+ for (j=0 ; j<dim1 ; j++)
+ printf ("%d ", read_buf[k++]);
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 0 (read happened before process 0 wrote to dataset 1), or 5
+ (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare = read_buf[0];
+
+ VRFY((compare == 0 || compare == 5),
+ "Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
+ for (i=1; i<buf_size; i++) {
+ if (read_buf[i] != compare) {
+ printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
+ nerrors ++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5D close succeeded");
+
+ /* release data buffers */
+ if(write_buf) free(write_buf);
+ if(read_buf) free(read_buf);
+
+ /* open dataset2 (non-contiguous case) */
+ dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
+
+ /* allocate memory for data buffer */
+ write_buf = (int *)calloc(buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf malloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)calloc(buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf malloc succeeded");
+
+ for (i=0 ; i<buf_size ; i++) {
+ write_buf[i] = 5;
+ }
+ for (i=0 ; i<buf_size ; i++) {
+ read_buf[i] = 8;
+ }
+
+ atomicity = FALSE;
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+
+ block[0] = dim0/mpi_size - 1;
+ block[1] = dim1/mpi_size - 1;
+ stride[0] = block[0] + 1;
+ stride[1] = block[1] + 1;
+ count[0] = mpi_size;
+ count[1] = mpi_size;
+ start[0] = 0;
+ start[1] = 0;
+
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ MPI_Barrier (comm);
+
+ /* Process 0 writes to the dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+ /* All processes wait for the write to finish. This works because
+ atomicity is set to true */
+ MPI_Barrier (comm);
+ /* The other processes read the entire dataset */
+ if (0 != mpi_rank) {
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+ }
+
+ if(VERBOSE_MED) {
+ if (mpi_rank == 1) {
+ i=0;j=0;k=0;
+ for (i=0 ; i<dim0 ; i++) {
+ printf ("\n");
+ for (j=0 ; j<dim1 ; j++)
+ printf ("%d ", read_buf[k++]);
+ }
+ printf ("\n");
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 5 (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare;
+ i=0;j=0;k=0;
+
+ compare = 5;
+
+ for (i=0 ; i<dim0 ; i++) {
+ if (i >= mpi_rank*(block[0]+1)) {
+ break;
+ }
+ if ((i+1)%(block[0]+1)==0) {
+ k += dim1;
+ continue;
+ }
+ for (j=0 ; j<dim1 ; j++) {
+ if (j >= mpi_rank*(block[1]+1)) {
+ k += dim1 - mpi_rank*(block[1]+1);
+ break;
+ }
+ if ((j+1)%(block[1]+1)==0) {
+ k++;
+ continue;
+ }
+ else if (compare != read_buf[k]) {
+ printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ nerrors++;
+ }
+ k ++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* release data buffers */
+ if(write_buf) free(write_buf);
+ if(read_buf) free(read_buf);
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+}
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
new file mode 100644
index 0000000..544ba32
--- /dev/null
+++ b/testpar/t_file_image.c
@@ -0,0 +1,395 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for file image operations
+ */
+
+#include "testphdf5.h"
+
+/* file_image_daisy_chain_test
+ *
+ * Process zero:
+ *
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
+ *
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
+ *
+ * 3) Flushes the core file, and gets an image of it. Closes
+ * the core file.
+ *
+ * 4) Sends the image to process 1.
+ *
+ * 5) Awaits receipt on a file image from process n-1.
+ *
+ * 6) opens the image received from process n-1, verifies that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1)
+ *
+ * 7) closes the core file and exits.
+ *
+ * Process i (0 < i < n)
+ *
+ * 1) Await receipt of file image from process (i - 1).
+ *
+ * 2) Open the image with the core file driver, verify that i
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ *
+ * 3) Set v[i] = i in the core file.
+ *
+ * 4) Flush the core file and send it to process (i + 1) % n.
+ *
+ * 5) close the core file and exit.
+ *
+ * Test fails on a hang (if an image is not received), or on invalid data.
+ *
+ * JRM -- 11/28/11
+ */
+void
+file_image_daisy_chain_test(void)
+{
+ char file_name[1024] = "\0";
+ int mpi_size, mpi_rank;
+ int mpi_result;
+ int i;
+ int space_ndims;
+ MPI_Status rcvstat;
+ int * vector_ptr = NULL;
+ hid_t fapl_id = -1;
+ hid_t file_id; /* file IDs */
+ hid_t dset_id = -1;
+ hid_t dset_type_id = -1;
+ hid_t space_id = -1;
+ herr_t err;
+ hsize_t dims[1];
+ void * image_ptr = NULL;
+ ssize_t bytes_read;
+ ssize_t image_len;
+ hbool_t vector_ok = TRUE;
+ htri_t tri_result;
+
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file name */
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
+ (int)mpi_rank);
+
+ if(mpi_rank == 0) {
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "created core file");
+
+ dims[0] = (hsize_t)mpi_size;
+ space_id = H5Screate_simple(1, dims, dims);
+ VRFY((space_id >= 0), "created data space");
+
+ dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "created data set");
+
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
+ */
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory representation of vector");
+
+ vector_ptr[0] = 0;
+ for(i = 1; i < mpi_size; i++)
+ vector_ptr[i] = -1;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote initial data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+
+ /* 3) Flush the core file, and get an image of it. Close
+ * the core file.
+ */
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got image file size");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+
+ /* 4) Send the image to process 1. */
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+
+ /* 5) Await receipt on a file image from process n-1. */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD,
+ &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result = MPI_Recv((void *)image_ptr, (int)image_len,
+ MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD,
+ &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), \
+ "received file image from process n-1");
+
+ /* 6) open the image received from process n-1, verify that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1).
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for(i = 0; i < mpi_size; i++)
+ if(vector_ptr[i] != i)
+ vector_ok = FALSE;
+ VRFY((vector_ok), "verified received vector.");
+
+ /* 7) closes the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+ } else {
+ /* 1) Await receipt of file image from process (i - 1). */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD,
+ &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), \
+ "received image size from process mpi_rank-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result = MPI_Recv((void *)image_ptr, (int)image_len,
+ MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD,
+ &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), \
+ "received file image from process mpi_rank-1");
+
+ /* 2) Open the image with the core file driver, verify that it
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ H5Eprint2(H5P_DEFAULT, stderr);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for(i = 0; i < mpi_size; i++){
+ if(i < mpi_rank) {
+ if(vector_ptr[i] != i)
+ vector_ok = FALSE;
+ } else {
+ if(vector_ptr[i] != -1)
+ vector_ok = FALSE;
+ }
+ }
+ VRFY((vector_ok), "verified received vector.");
+
+
+ /* 3) Set v[i] = i in the core file. */
+
+ vector_ptr[mpi_rank] = mpi_rank;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote modified data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+
+ /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
+
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got (possibly modified) image file len");
+
+ image_ptr = (void *)HDrealloc((void *)image_ptr, (size_t)image_len);
+ VRFY(image_ptr != NULL, "re-allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), \
+ "sent image size to process (mpi_rank + 1) % mpi_size");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), \
+ "sent image to process (mpi_rank + 1) % mpi_size");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+ /* 5) close the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+ }
+
+ return;
+
+} /* file_image_daisy_chain_test() */
+
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index e3c72ef..3a86642 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -502,6 +502,13 @@ int main(int argc, char **argv)
"test actual io mode proprerty",
PARATESTFILE);
+ if((mpi_size < 2) && MAINPROCESS) {
+ printf("File Image Ops daisy chain test needs at least 2 processes.\n");
+ printf("File Image Ops daisy chain test will be skipped \n");
+ }
+ AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
+ "file image ops daisy chain", NULL);
+
/* Display testing information */
TestInfo(argv[0]);
@@ -512,6 +519,18 @@ int main(int argc, char **argv)
/* Parse command line arguments */
TestParseCmdLine(argc, argv);
+ if((mpi_size < 2)&& MAINPROCESS ) {
+ printf("Atomicity tests need at least 2 processes to participate\n");
+ printf("8 is more recommended.. Atomicity tests will be skipped \n");
+ }
+ else if (facc_type != FACC_MPIO && MAINPROCESS) {
+ printf("Atomicity tests will not work with a non MPIO VFD\n");
+ }
+ else if(mpi_size >= 2 && facc_type == FACC_MPIO){
+ AddTest("atomicity", dataset_atomicity, NULL,
+ "dataset atomic updates", PARATESTFILE);
+ }
+
if (facc_type == FACC_MPIPOSIX && MAINPROCESS){
printf("===================================\n"
" Using MPIPOSIX driver\n"
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 11656f9..da11c62 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -43,6 +43,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define DATASETNAME2 "Data2"
#define DATASETNAME3 "Data3"
#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
/* Hyperslab layout styles */
#define BYROW 1 /* divide into slabs of rows */
@@ -225,6 +227,7 @@ void independent_group_read(void);
void test_fapl_mpio_dup(void);
void test_fapl_mpiposix_dup(void);
void test_split_comm_access(void);
+void dataset_atomicity(void);
void dataset_writeInd(void);
void dataset_writeAll(void);
void extend_writeInd(void);
@@ -266,6 +269,7 @@ void lower_dim_size_comp_test(void);
void link_chunk_collective_io_test(void);
void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
+void file_image_daisy_chain_test(void);
#ifdef H5_HAVE_FILTER_DEFLATE
void compress_readAll(void);
#endif /* H5_HAVE_FILTER_DEFLATE */