summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorVailin Choi <vchoi@hdfgroup.org>2013-04-18 18:23:51 (GMT)
committerVailin Choi <vchoi@hdfgroup.org>2013-04-18 18:23:51 (GMT)
commit6ee0e05fb94445551840fcb80b9b1c254c736799 (patch)
tree0acf68cdc69dae2ff0e2a72b36e4efb6f8fbfd06 /testpar
parent94f89911545edce6fc9ebde2c83357cbda0bbd70 (diff)
downloadhdf5-6ee0e05fb94445551840fcb80b9b1c254c736799.zip
hdf5-6ee0e05fb94445551840fcb80b9b1c254c736799.tar.gz
hdf5-6ee0e05fb94445551840fcb80b9b1c254c736799.tar.bz2
[svn-r23599] Bring revisions 22802 : 23085 from trunk to revise_chunks.
h5committested.
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt1
-rw-r--r--testpar/Makefile.am3
-rw-r--r--testpar/Makefile.in6
-rw-r--r--testpar/t_coll_chunk.c36
-rw-r--r--testpar/t_dset.c104
-rw-r--r--testpar/t_prop.c452
-rw-r--r--testpar/testphdf5.c5
-rw-r--r--testpar/testphdf5.h10
8 files changed, 526 insertions, 91 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 5d594a6..88c47f5 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -23,6 +23,7 @@ SET (testphdf5_SRCS
${HDF5_TEST_PAR_SOURCE_DIR}/t_span_tree.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_chunk_alloc.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_filter_read.c
+ ${HDF5_TEST_PAR_SOURCE_DIR}/t_prop.c
)
#-- Adding test for testhdf5
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index b2fb97c..e934f08 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -32,7 +32,8 @@ check_PROGRAMS = $(TEST_PROG_PARA)
check_SCRIPTS= $(TEST_SCRIPT)
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
- t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
+ t_prop.c
# The tests all depend on the hdf5 library and the test library
LDADD = $(LIBH5TEST) $(LIBHDF5)
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index 5b44e6e..394b3b1 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -117,7 +117,7 @@ am_testphdf5_OBJECTS = testphdf5.$(OBJEXT) t_dset.$(OBJEXT) \
t_file.$(OBJEXT) t_file_image.$(OBJEXT) t_mdset.$(OBJEXT) \
t_ph5basic.$(OBJEXT) t_coll_chunk.$(OBJEXT) \
t_span_tree.$(OBJEXT) t_chunk_alloc.$(OBJEXT) \
- t_filter_read.$(OBJEXT)
+ t_filter_read.$(OBJEXT) t_prop.$(OBJEXT)
testphdf5_OBJECTS = $(am_testphdf5_OBJECTS)
testphdf5_LDADD = $(LDADD)
testphdf5_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5)
@@ -463,7 +463,8 @@ TEST_PROG_PARA = t_mpi t_posix_compliant testphdf5 t_cache t_pflush1 t_pflush2 t
TEST_SCRIPT_PARA = testph5.sh
check_SCRIPTS = $(TEST_SCRIPT)
testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
- t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
+ t_prop.c
# The tests all depend on the hdf5 library and the test library
@@ -576,6 +577,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_pflush2.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_ph5basic.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_posix_compliant.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_prop.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_shapesame.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_span_tree.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testphdf5.Po@am__quote@
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 61e7bfd..73e7f09 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -258,8 +258,10 @@ coll_chunk5(void)
/*-------------------------------------------------------------------------
* Function: coll_chunk6
*
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
- selection with at least number of 2*mpi_size chunks
+ * Purpose: Test direct request for multi-chunk-io.
+ * Wrapper to test the collective chunk IO for regular JOINT
+ * selection with at least number of 2*mpi_size chunks
+ * Test for direct to Multi Chunk I/O.
*
* Return: Success: 0
*
@@ -489,6 +491,12 @@ coll_chunk10(void)
*
* Failure: -1
*
+ * Modifications:
+ * Remove invalid temporary property checkings for API_LINK_HARD and
+ * API_LINK_TRUE cases.
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
* Programmer: Unknown
* July 12th, 2004
*
@@ -634,11 +642,6 @@ coll_chunktest(const char* filename,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- prop_value = H5D_XFER_COLL_CHUNK_FIX;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_TO_MULTI, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
-
break;
case API_MULTI_HARD:
@@ -654,11 +657,6 @@ coll_chunktest(const char* filename,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- prop_value = H5D_XFER_COLL_CHUNK_FIX;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_TO_MULTI_OPT, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
-
break;
case API_LINK_FALSE:
@@ -699,25 +697,17 @@ coll_chunktest(const char* filename,
case API_LINK_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
- if(prop_value !=0){/*double check if the option is switched to multiple chunk internally.*/
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_TO_MULTI, &prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 1),"API to set LINK COLLECTIVE IO without optimization succeeded");
- }
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
break;
case API_MULTI_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO without optimization succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
break;
case API_LINK_TRUE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
- if(prop_value !=0){/*double check if the option is switched to multiple chunk internally.*/
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_TO_MULTI_OPT, &prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 1),"API to set LINK COLLECTIVE IO without optimization succeeded");
- }
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
break;
case API_LINK_FALSE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 311d4be..1410824 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2541,12 +2541,12 @@ none_selection_chunk(void)
* H5D_mpi_chunk_collective_io, processes disagree. The root reports
* collective, the rest report independent I/O
*
- * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL:
- * H5D_mpi_chunk_collective_io_no_opt, each process reports collective I/O
- *
- * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE:
- * H5D_mpi_chunk_collective_io_no_opt, processes disagree
- * (collective and mixed I/O)
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
+ * Set directly go to multi-chunk-io without num threshold calc.
*
* TEST_ACTUAL_IO_LINK_CHUNK:
* H5D_link_chunk_collective_io, processes report linked chunk I/O
@@ -2565,10 +2565,17 @@ none_selection_chunk(void)
* (The most complex case that works on all builds) and then performs
* an independent read and write with the same dxpls.
*
- * It may seem like TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND and
- * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX have been accidentally
- * left out. This is intentional; the other test cases sufficiently
- * cover all cases for Multi Chunk No Opt I/O.
+ * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
+ * is not needed as they are covered by DIRECT_CHUNK_MIX and
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
+ *
+ * Modification:
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
*
* Programmer: Jacob Gruber
* Date: 2011-04-06
@@ -2583,8 +2590,8 @@ test_actual_io_mode(int selection_mode) {
H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
const char * filename;
const char * test_name;
- hbool_t multi_chunk_no_opt;
- hbool_t multi_chunk_with_opt;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
hbool_t is_chunked;
hbool_t is_collective;
int mpi_size = -1;
@@ -2611,18 +2618,18 @@ test_actual_io_mode(int selection_mode) {
hsize_t count[RANK];
hsize_t block[RANK];
hbool_t use_gpfs = FALSE;
+ char message[256];
herr_t ret;
/* Set up some flags to make some future if statements slightly more readable */
- multi_chunk_no_opt = (
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE );
+ direct_multi_chunk_io = (
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
- multi_chunk_with_opt = (
+ multi_chunk_io = (
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
@@ -2691,6 +2698,7 @@ test_actual_io_mode(int selection_mode) {
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
/* Since the dataset is chunked by row and each process selects a row,
* each process writes to a different chunk. This forces all I/O to be
* independent.
@@ -2704,6 +2712,7 @@ test_actual_io_mode(int selection_mode) {
/* Collective I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
/* The dataset is chunked by rows, so each process takes a column which
* spans all chunks. Since the processes write non-overlapping regular
* selections to each chunk, the operation is purely collective.
@@ -2797,39 +2806,6 @@ test_actual_io_mode(int selection_mode) {
break;
- /* Collective I/O without optimization */
- case TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL:
- /* The dataset is chunked by rows, so when each process takes a column, its
- * selection spans all chunks. Since no process writes more chunks than any
- * other, colective I/O is never broken. */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- test_name = "Multi Chunk No Opt - Collective";
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK_NO_OPT;
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
- break;
-
-
- /* Mixed I/O without optimization with disagreement */
- case TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE:
- /* Each process takes a column, but the root's column is shortened so that
- * it only reads the first chunk. Since all the other processes are writing
- * to more chunks, they will break collective after the first chunk.
- */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- if(mpi_rank == 0)
- block[0] = block[0] / mpi_size;
-
- test_name = "Multi Chunk No Opt - Mixed (Disagreement)";
- actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK_NO_OPT;
-
- if(mpi_rank == 0)
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
- else
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
-
- break;
-
/* Linked Chunk I/O */
case TEST_ACTUAL_IO_LINK_CHUNK:
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
@@ -2905,20 +2881,25 @@ test_actual_io_mode(int selection_mode) {
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* Set the threshold number of processes per chunk for link chunk I/O
- * to twice mpi_size. This will prevent the threshold from ever being
- * met, thus forcing multi chunk io instead of link chunk io.
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
+ * multi chunk io instead of link chunk io.
+ * This is via deault.
*/
- if(multi_chunk_with_opt) {
+ if(multi_chunk_io) {
+ /* force multi-chunk-io by threshold */
ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+ /* set this to manipulate testing senario about allocating processes
+ * to chunks */
ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
}
- /* Request multi chunk I/O without optimization */
- if(multi_chunk_no_opt) {
+ /* Set directly go to multi-chunk-io without threshold calc. */
+ if(direct_multi_chunk_io) {
+ /* set for multi chunk io by property*/
ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
@@ -2961,7 +2942,6 @@ test_actual_io_mode(int selection_mode) {
/* Test values */
if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) {
- char message[100];
sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
@@ -3045,6 +3025,9 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
@@ -3056,8 +3039,11 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL);
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE);
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
new file mode 100644
index 0000000..4601316
--- /dev/null
+++ b/testpar/t_prop.c
@@ -0,0 +1,452 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for encoding/decoding plists sent between processes
+ */
+
+#include "testphdf5.h"
+#include "H5Pprivate.h"
+
+static int
+test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
+{
+ MPI_Request req[2];
+ MPI_Status status;
+ hid_t pl; /* Decoded property list */
+ void *buf = NULL;
+ size_t buf_size = 0;
+ herr_t ret; /* Generic return value */
+
+ if(mpi_rank == 0) {
+ /* first call to encode returns only the size of the buffer needed */
+ ret = H5Pencode(orig_pl, NULL, &buf_size);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ buf = (uint8_t *)HDmalloc(buf_size);
+
+ ret = H5Pencode(orig_pl, buf, &buf_size);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ MPI_Isend(&buf_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]);
+ MPI_Isend(buf, (int)buf_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
+ } /* end if */
+ if(mpi_rank == recv_proc) {
+ MPI_Recv(&buf_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
+ buf = (uint8_t *)HDmalloc(buf_size);
+ MPI_Recv(buf, (int)buf_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
+
+ pl = H5Pdecode(buf);
+ VRFY((pl >= 0), "H5Pdecode succeeded");
+
+ VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded");
+
+ ret = H5Pclose(pl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ } /* end if */
+
+ if(0 == mpi_rank)
+ MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
+
+ if(NULL != buf)
+ HDfree(buf);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ return(0);
+}
+
+
+
+void
+test_plist_ed(void)
+{
+ hid_t dcpl; /* dataset create prop. list */
+ hid_t dapl; /* dataset access prop. list */
+ hid_t dxpl; /* dataset transfer prop. list */
+ hid_t gcpl; /* group create prop. list */
+ hid_t lcpl; /* link create prop. list */
+ hid_t lapl; /* link access prop. list */
+ hid_t ocpypl; /* object copy prop. list */
+ hid_t ocpl; /* object create prop. list */
+ hid_t fapl; /* file access prop. list */
+ hid_t fcpl; /* file create prop. list */
+ hid_t strcpl; /* string create prop. list */
+ hid_t acpl; /* attribute create prop. list */
+
+ int mpi_size, mpi_rank, recv_proc;
+
+ hsize_t chunk_size = 16384; /* chunk size */
+ double fill = 2.7f; /* Fill value */
+ size_t nslots = 521*2;
+ size_t nbytes = 1048576 * 10;
+ double w0 = 0.5f;
+ unsigned max_compact;
+ unsigned min_dense;
+ hsize_t max_size[1]; /*data space maximum size */
+ const char* c_to_f = "x+32";
+ H5AC_cache_config_t my_cache_config = {
+ H5AC__CURR_CACHE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ FALSE,
+ "temp",
+ TRUE,
+ FALSE,
+ ( 2 * 2048 * 1024),
+ 0.3f,
+ (64 * 1024 * 1024),
+ (4 * 1024 * 1024),
+ 60000,
+ H5C_incr__threshold,
+ 0.8f,
+ 3.0f,
+ TRUE,
+ (8 * 1024 * 1024),
+ H5C_flash_incr__add_space,
+ 2.0f,
+ 0.25f,
+ H5C_decr__age_out_with_threshold,
+ 0.997f,
+ 0.8f,
+ TRUE,
+ (3 * 1024 * 1024),
+ 3,
+ FALSE,
+ 0.2f,
+ (256 * 2048),
+ H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
+
+ herr_t ret; /* Generic return value */
+
+
+ if(VERBOSE_MED)
+ printf("Encode/Decode DCPLs\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if(mpi_size == 1)
+ recv_proc = 0;
+ else
+ recv_proc = 1;
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ VRFY((ret >= 0), "H5Pset_alloc_time succeeded");
+
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
+ VRFY((ret>=0), "set fill-value succeeded");
+
+ max_size[0] = 100;
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
+ (hsize_t)(max_size[0] * sizeof(int)/4));
+ VRFY((ret>=0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
+ (hsize_t)(max_size[0] * sizeof(int)/4));
+ VRFY((ret>=0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
+ (hsize_t)(max_size[0] * sizeof(int)/4));
+ VRFY((ret>=0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
+ (hsize_t)(max_size[0] * sizeof(int)/4));
+ VRFY((ret>=0), "set external succeeded");
+
+ ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE DAPLS *****/
+ dapl = H5Pcreate(H5P_DATASET_ACCESS);
+ VRFY((dapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0);
+ VRFY((ret >= 0), "H5Pset_chunk_cache succeeded");
+
+ ret = test_encode_decode(dapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE OCPLS *****/
+ ocpl = H5Pcreate(H5P_OBJECT_CREATE);
+ VRFY((ocpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded");
+
+ ret = H5Pset_attr_phase_change(ocpl, 110, 105);
+ VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded");
+
+ ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL);
+ VRFY((ret >= 0), "H5Pset_filter succeeded");
+
+ ret = test_encode_decode(ocpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE DXPLS *****/
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_btree_ratios(dxpl, 0.2f, 0.6f, 0.2f);
+ VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");
+
+ ret = H5Pset_hyper_vector_size(dxpl, 5);
+ VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded");
+
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC);
+ VRFY((ret >= 0), "H5Pset_edc_check succeeded");
+
+ ret = H5Pset_data_transform(dxpl, c_to_f);
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+
+ ret = test_encode_decode(dxpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dxpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE GCPLS *****/
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_local_heap_size_hint(gcpl, 256);
+ VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded");
+
+ ret = H5Pset_link_phase_change(gcpl, 2, 2);
+ VRFY((ret >= 0), "H5Pset_link_phase_change succeeded");
+
+ /* Query the group creation properties */
+ ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense);
+ VRFY((ret >= 0), "H5Pget_est_link_info succeeded");
+
+ ret = H5Pset_est_link_info(gcpl, 3, 9);
+ VRFY((ret >= 0), "H5Pset_est_link_info succeeded");
+
+ ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_link_creation_order succeeded");
+
+ ret = test_encode_decode(gcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(gcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE LCPLS *****/
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret= H5Pset_create_intermediate_group(lcpl, TRUE);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE LAPLS *****/
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_nlinks(lapl, (size_t)134);
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY);
+ VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded");
+
+ ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod");
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ /* Create FAPL for the elink FAPL */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ /* Close the elink's FAPL */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = test_encode_decode(lapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE OCPYPLS *****/
+ ocpypl = H5Pcreate(H5P_OBJECT_COPY);
+ VRFY((ocpypl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG);
+ VRFY((ret >= 0), "H5Pset_copy_object succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = test_encode_decode(ocpypl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpypl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE FAPLS *****/
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_family_offset(fapl, 1024);
+ VRFY((ret >= 0), "H5Pset_family_offset succeeded");
+
+ ret = H5Pset_meta_block_size(fapl, 2098452);
+ VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
+
+ ret = H5Pset_sieve_buf_size(fapl, 1048576);
+ VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded");
+
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3f);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+
+ ret = H5Pset_elink_file_cache_size(fapl, 10485760);
+ VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded");
+
+ ret = H5Pset_gc_references(fapl, 1);
+ VRFY((ret >= 0), "H5Pset_gc_references succeeded");
+
+ ret = H5Pset_small_data_block_size(fapl, 2048);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((ret >= 0), "H5Pset_libver_bounds succeeded");
+
+ ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK);
+ VRFY((ret >= 0), "H5Pset_fclose_degree succeeded");
+
+ ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP);
+ VRFY((ret >= 0), "H5Pset_multi_type succeeded");
+
+ ret = H5Pset_mdc_config(fapl, &my_cache_config);
+ VRFY((ret >= 0), "H5Pset_mdc_config succeeded");
+
+ ret = test_encode_decode(fapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE FCPLS *****/
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_userblock(fcpl, 1024);
+ VRFY((ret >= 0), "H5Pset_userblock succeeded");
+
+ ret = H5Pset_istore_k(fcpl, 3);
+ VRFY((ret >= 0), "H5Pset_istore_k succeeded");
+
+ ret = H5Pset_sym_k(fcpl, 4, 5);
+ VRFY((ret >= 0), "H5Pset_sym_k succeeded");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");
+
+ ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");
+
+ ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded");
+
+ ret = H5Pset_sizes(fcpl, 8, 4);
+ VRFY((ret >= 0), "H5Pset_sizes succeeded");
+
+ ret = test_encode_decode(fcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE STRCPLS *****/
+ strcpl = H5Pcreate(H5P_STRING_CREATE);
+ VRFY((strcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(strcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(strcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /******* ENCODE/DECODE ACPLS *****/
+ acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ VRFY((acpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(acpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(acpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+}
+
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index a4df46e..784892a 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -440,7 +440,7 @@ int main(int argc, char **argv)
"linked chunk collective IO without optimization",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
coll_chunk6,NULL,
- "multi-chunk collective IO without optimization",PARATESTFILE);
+ "multi-chunk collective IO with direct request",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
coll_chunk7,NULL,
"linked chunk collective IO with optimization",PARATESTFILE);
@@ -510,6 +510,9 @@ int main(int argc, char **argv)
"test cause for broken collective io",
PARATESTFILE);
+ AddTest("edpl", test_plist_ed, NULL,
+ "encode/decode Property Lists", NULL);
+
if((mpi_size < 2) && MAINPROCESS) {
printf("File Image Ops daisy chain test needs at least 2 processes.\n");
printf("File Image Ops daisy chain test will be skipped \n");
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 29ad411..fa83697 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -169,11 +169,10 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
-#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND 6
-#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL 7
-#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE 8
-#define TEST_ACTUAL_IO_LINK_CHUNK 9
-#define TEST_ACTUAL_IO_CONTIGUOUS 10
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
+#define TEST_ACTUAL_IO_LINK_CHUNK 8
+#define TEST_ACTUAL_IO_CONTIGUOUS 9
/* Definitions of the selection mode for the no_collective_cause_tests function. */
#define TEST_COLLECTIVE 0x001
@@ -234,6 +233,7 @@ extern int facc_type; /*Test file access type */
extern int dxfer_coll_type;
/* Test program prototypes */
+void test_plist_ed(void);
void multiple_dset_write(void);
void multiple_group_write(void);
void multiple_group_read(void);